diff --git "a/related_53K/test_related_long_2404.17762v1.json" "b/related_53K/test_related_long_2404.17762v1.json" new file mode 100644--- /dev/null +++ "b/related_53K/test_related_long_2404.17762v1.json" @@ -0,0 +1,8600 @@ +[ + { + "url": "http://arxiv.org/abs/2404.17762v1", + "title": "Large Multi-modality Model Assisted AI-Generated Image Quality Assessment", + "abstract": "Traditional deep neural network (DNN)-based image quality assessment (IQA)\nmodels leverage convolutional neural networks (CNN) or Transformer to learn the\nquality-aware feature representation, achieving commendable performance on\nnatural scene images. However, when applied to AI-Generated images (AGIs),\nthese DNN-based IQA models exhibit subpar performance. This situation is\nlargely due to the semantic inaccuracies inherent in certain AGIs caused by\nuncontrollable nature of the generation process. Thus, the capability to\ndiscern semantic content becomes crucial for assessing the quality of AGIs.\nTraditional DNN-based IQA models, constrained by limited parameter complexity\nand training data, struggle to capture complex fine-grained semantic features,\nmaking it challenging to grasp the existence and coherence of semantic content\nof the entire image. To address the shortfall in semantic content perception of\ncurrent IQA models, we introduce a large Multi-modality model Assisted\nAI-Generated Image Quality Assessment (MA-AGIQA) model, which utilizes\nsemantically informed guidance to sense semantic information and extract\nsemantic vectors through carefully designed text prompts. Moreover, it employs\na mixture of experts (MoE) structure to dynamically integrate the semantic\ninformation with the quality-aware features extracted by traditional DNN-based\nIQA models. Comprehensive experiments conducted on two AI-generated content\ndatasets, AIGCQA-20k and AGIQA-3k show that MA-AGIQA achieves state-of-the-art\nperformance, and demonstrate its superior generalization capabilities on\nassessing the quality of AGIs. Code is available at\nhttps://github.com/wangpuyi/MA-AGIQA.", + "authors": "Puyi Wang, Wei Sun, Zicheng Zhang, Jun Jia, Yanwei Jiang, Zhichao Zhang, Xiongkuo Min, Guangtao Zhai", + "published": "2024-04-27", + "updated": "2024-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "Traditional IQA models. In the field of No-Reference Image Quality Assessment (NR-IQA) [53], traditional models primarily fall into two categories: handcrafted feature-based and DNN-based. Models based on handcrafted features, such as BRISQUE [24], ILNIQE [54], and NIQE [25], primarily utilize natural scene statistics (NSS) [24, 25] derived from natural images. These models are adept at detecting domain variations introduced by synthetic distortions, including spatial [24, 25, 54], gradient [24], discrete cosine transform (DCT) [36], and wavelet-based distortions [26]. However, despite their effectiveness on datasets with type-specific distortions, these handcrafted feature-based approaches exhibit limited capabilities in modeling real-world distortions. With the advent of deep learning, CNNs have revolutionized many tasks in computer vision. [13] is pioneer in applying deep convolutional neural networks to NR-IQA. Its methodology employs CNNs to directly learn representations of image quality from raw image patches, bypassing the need for handcrafted features or a reference image. Following this, DBCNN [55] introduces a deep bilinear CNN for blind image quality assessment (BIQA) [53], innovatively merging two CNN streams to address both synthetic and authentic image distortions separately. Furthermore, HyperIQA [39], a self-adaptive hyper network, evaluates the quality of authentically distorted images through a novel three-stage process: content understanding, perception rule learning, and quality prediction. The success of Vision Transformers (ViT) [5] in various computer vision tasks has led to significant advancements. In the realm of IQA, IQT [52] leverages the combination of reference and distorted image features, extracted by CNNs, as inputs for a Transformerbased quality prediction task. MUSIQ [14] utilizes a Transformer to encode distortion image features across three scales, addressing the challenge of varying input image sizes during training and testing. TReS introduces relative ranking and self-consistency loss to capitalize on the abundant self-supervisory information available, aiming to decrease the network\u2019s sensitivity. What\u2019s more, MANIQA [49] explored multi-dimensional feature interaction, utilizing spatial and channel structural information to calculate a non-local representation of the image, enhancing the model\u2019s ability to assess image quality comprehensively. LMMs for IQA. Recent methodologies employing LMMs for IQA either utilize LMMs in isolation or combine them with DNNs as feature extractors to enhance performance. [30] introduces an innovative image-prompt fusion module, along with a specially designed quality assessment token, aiming to learn comprehensive representations for AGIs, providing insights from image-prompt alignment. However, the evaluation of AGIs in practical scenarios often does not involve prompts and image-prompt alignment is more significant for assessing the capabilities of generative models rather than images quality. CLIPIQA [42] signifies a breakthrough in assessing image quality and perception by harnessing the strengths of CLIP [31] models. This method bridges the divide between measurable image quality attributes and subjective perceptions of quality without necessitating extensive labeling efforts. Nonetheless, their [30, 42] dependence on visual-text similarity for quality score prediction often constrains their performance, rendering it marginally less effective compared to methods that exclusively focus on visual analysis. What\u2019s more, Q-Bench [43] innovates with a softmax strategy, allowing LMMs to deduce quantifiable quality scores. This is achieved by extracting results from softmax pooling on logits corresponding to five quality-related tokens. And Q-Align [45] employs strategic alignment techniques to foster accuracy. Expanding further, [47] delves into enhancing the assessment of AGIs by focusing on optimizing individual text prompts to leverage the intrinsic capabilities of LMMs, aiming to provide a more nuanced understanding and evaluation of image quality of AGIs. However, these methods, while notable, fall short of achieving satisfying efficacy, leaving considerable room for improvement.", + "pre_questions": [], + "main_content": "INTRODUCTION The rapid advancement of artificial intelligence (AI) has led to a proliferation of AI-generated images (AGIs) on the Internet. However, current AI-driven image generation systems often produce multiple images, necessitating manual selection by users to identify the best ones. This labor-intensive process is not only time-consuming but also a significant barrier to fully automating image processing pipelines. Visual quality, as an important factor to select attractive AGIs, has gained lots of attention in recent years [17, 20]. In this paper, we focus on how to evaluate the visual quality of AGIs, which on the one hand can be used to filter high-quality images from generation systems and on the other hand, can sever as reward function to optimize image generation models [2], propelling progress in the field of AI-based image generation techniques. arXiv:2404.17762v1 [cs.CV] 27 Apr 2024 Puyi Wang, Wei Sun, Zicheng Zhang, Jun Jia, Yanwei Jiang, Zhichao Zhang, Xiongkuo Min, Guangtao Zhai While a substantial number of deep neural network (DNN)-based image quality assessment (IQA) models, such as HyperIQA [39], MANIQA [49], DBCNN [55], etc., have been developed, these models were specifically designed for and trained on natural scene images. When applied directly to AGIs, these models often exhibit poor performance. This is due to the fact that quality assessment of natural images primarily targets issues such as blur, noise, and other forms of degradation caused by photography equipment or techniques, which are not applicable to AGIs as they do not undergo such degradation during the generation process. Therefore, overemphasizing factors like blur or noise during the evaluation of AGIs is inappropriate. As shown in Figure 1, AI-generated images, derived from advanced image generative models such as generative adversarial networks (GANs) [23], diffision [10] and related variant [4, 6, 11, 29, 32\u2013 34, 48], often exhibit issues not commonly found in naturally captured images. Visual quality of AGIs depends not only on basic visual features such as noise, blur [18, 38, 58], etc., but also on more intricate semantic perception [17], such as existence of reasonable semantic content, scene plausibility, and the coherence among objects [19, 43, 44, 46, 57]. Although re-training existing IQA models on AGIs datasets leads to improved outcomes, it fails to achieve optimal performance. One reason is that traditional DNN models, especially early convolutional neural networks (CNNs), despite their notable achievements in tasks like image recognition and classification [9, 37, 41], still struggle to grasp the fine-grained semantic content of images [56]. What\u2019s more, traditional DNN-based IQA models fail to capture the intrinsic characteristics essential for assessing image quality and thus exhibit poor generalization abilities. Hence, we argue that the quality assessment models of AGIs are still in their infancy and need further exploration. To address the issue of semantic awareness, we resort to large multi-modality models (LMMs). Because LMMs is typically pretrained on large-scale datasets and has already learned a rich set of joint visual and language knowledge, it can effectively capture the fine-grained semantic features relevant to input prompts. However, LMMs perform excellently in high-level visual understanding tasks [1, 16], yet they do not perform well on tasks that are relatively simple for humans, such as identifying structural and textural distortions, color differences, and geometric transformations [47]. In contrast, traditional deep learning networks excel at perceiving low-dimensional features and can fit better to the data distribution of specific task [12]. Therefore, the idea of combining LMMs with traditional deep learning networks is a natural progression. In this paper, we introduce a large Multi-modality model Assisted AI-Generated Image Quality Assessment (MA-AGIQA) framework, which enhances the capacity of traditional DNN-based IQA models to understand semantic content by incorporating LMM. Our approach initially repurposes a DNN, MANIQA [49], as an extractor for quality-aware features and establishes it as the training backbone for the MA-AGIQA framework. Subsequently, we guide a LMM, mPLUG-Owl2 [50], to focus on fine-grained semantic information through meticulously crafted prompts. We then extract and store the last-layer hidden vector from mPLUG-Owl2, merging it with features extracted by MANIQA to infuse the model with rich semantic insights. Finally, we employ a MoE to dynamically integrate quality-aware features with fine-grained semantic features, Grainy Images Subset SRCC: 0.2545 \u2013 MANIQA SRCC: 0.8364 \u2013 Ours Whole AIGCQA-20k SRCC: 0.8507 \u2013 MANIQA SRCC: 0.8644 \u2013 Ours Figure 2: For the subset of grainy images (extracted from prompts containing \u201cdigital\u201d and generated by LCM_Pixart in AIGCQA-20k) that include semantic content, MANIQA achieves an SRCC of 0.2545, which is 70.0% lower than the overall SRCC of 0.8507. In contrast, our MA-AGIQA model achieves an SRCC of 0.8364. It demonstrates that our model possesses a significantly enhanced understanding of AGIs, particularly those whose quality is deeply intertwined with semantic elements. catering to the unique focal points of different images. As demonstrated in Figure 2, our approach surpasses MANIQA in terms of SRCC, particularly within subsets comprising semantically rich images overflowing with graininess, indicating that our methodology shows remarkable congruence with the human visual system\u2019s (HVS) perceptual capabilities. MA-AGIQA achieves SRCC values of 0.8939 and 0.8644 on the AGIQA-3k and AIGCQA-20k datasets, respectively, exceeding the state-of-the-art models by 2.03% and 1.37%, and also demonstrates superior cross-dataset performance. Our contributions are three-fold: \u2022 We systematically analyze the issue of traditional DNNbased IQA lacking the ability to understand the semantic content of AGIs, emphasizing the importance of incorporating semantic information into traditional DNN-based IQA models. \u2022 We introduce the MA-AGIQA model, which incorporates LMM to extract fine-grained semantic features and dynamically integrates these features with traditional DNN-based IQA models. \u2022 We evaluate the MA-AGIQA model on two AI-generated IQA datasets. Experimental results demonstrate that our model surpasses current state-of-the-art methods without extra training data and also showcases superior crossdataset performance. Extensive ablation studies further validate the effectiveness of each component. Large Multi-modality Model Assisted AI-Generated Image Quality Assessment As depicted in Figure 3, framework of MA-AGIQA is structured into three sections. Section 3.1 introduces our adoption of a DNN, specifically MANIQA [49], tailored for the AGIs quality assessment task, serving as our primary training backbone. In Section 3.2, we incorporate the LMM mPLUG-Owl2 [50] as a feature extractor. This component is crucial for acquiring fine-grained semantic features via carefully crafted text prompts. Lastly, Section 3.3 addresses the variability in focal points across different images. To adaptively integrate the feature vectors during training, we utilizes a MoE structure for feature fusion. This approach ensures that the most salient features are emphasized. Further details are elaborated below. 3.1 Quality-aware Feature Extraction To leverage the capability of DNNs to adapt to the data distribution of specific tasks, we employ MANIQA [49] as a quality-aware feature extractor. MANIQA enhances the evaluation of image quality by applying attention mechanisms across both the channel and spatial dimensions, thereby increasing the interaction among various regions of the image, both globally and locally. This approach generates projections \ud835\udc64\ud835\udc52\ud835\udc56\ud835\udc54\u210e\ud835\udc61(W ) and \ud835\udc60\ud835\udc50\ud835\udc5c\ud835\udc5f\ud835\udc52(S ) for a given image, and the final rating of the whole image is determined through the sum of multiplication of S by W, which can be illustrated as Equation (1): (S, W) = T ([\ud835\udc56\ud835\udc5a\ud835\udc4e\ud835\udc54\ud835\udc52]), \ufffdS \u00d7 W \ufffd (1) (S, W) = T ([\ud835\udc56\ud835\udc5a\ud835\udc4e\ud835\udc54\ud835\udc52]), rating = \ufffdS \u00d7 W \ufffdW , (1) e dimensional vectors. pplying MANIQA to the quality assessment \ufffd \ufffdW where S and W are one dimensional vectors. However, directly applying MANIQA to the quality assessment of AGIs presents challenges, as illustrated in Figure 4. Image (a) displays a complex, symmetrical pattern, devoid of meaningful semantic content. Image (b) features incoherent areas, such as two grey holes in the sky that are inconsistent with the common sense. The blurriness and fuzziness of the man\u2019s face in image (c) along the edges significantly impair human perception. Conversely, image (d), despite its severe graininess, retains its semantic integrity, representing an appealing artistic form. Traditional DNN-based models like MANIQA, lacking the capacity to comprehend semantic content, tend to overestimate the quality of images (a), (b), and (c), resulting in scores much higher than the ground truth. However, these images should be rated as low quality due to the poor viewing Puyi Wang, Wei Sun, Zicheng Zhang, Jun Jia, Yanwei Jiang, Zhichao Zhang, Xiongkuo Min, Guangtao Zhai MLLM Evaluate if the image quality is compromised due to violations of coherence. Transformation Block 1 Transformation Block 2 Transformation Block 3 MANIQA Gating network Frozen Learning regression y Figure 3: Overview of our proposed MA-AGIQA framework. Initially, MANIQA is repurposed as the foundational training backbone, whose structure is modified to generate quality-aware features. Second, a parameter fixed LMM, mPLUG-Owl2, serves as a fine-grained semantic feature extractor. This module utilizes carefully crafted prompts to capture the desired semantic information. Finally, the AFM module acts as an organic feature integrator, dynamically combining these features for enhanced performance. (a) GT: 2.684 Prediction: 3.632 (b) GT: 3.145 Prediction: 3.762 (c) GT: 1.823 Prediction: 3.118 (d) GT: 3.606 Prediction: 2.994 Figure 4: Four types of image display with strong correlation between image quality and semantics. The ground truth and model predication of the relevant images are presented below each image, showing a significant difference between the model predication and the ground truth, indicating that the model\u2019s understanding of semantics is not sufficient. experience they offer. For image (d), traditional DNN-based models focus excessively on the graininess, mistaking it for a flaw, and assign a score significantly lower than the ground truth. This highlights the critical need for incorporating semantic information into the quality assessment of AGIs by traditional DNN-based models. To address this issue, modifications were made so that the generated S and W no longer produce a rating. Instead, they yield a quality-aware feature f1, setting the stage for the subsequent fusion with features extracted by LMM. f1 is generated as: f1 = S \u00d7 W. (2) During the training phase, the parameters of modified MANIQA are continuously updated. This refinement process ensures that MANIQA can extract features more relevant to the quality of AGIs. Furthermore, the training process facilitates a more seamless integration between MANIQA and LMM, leading to superior outcomes. 3.2 Fine-grained Semantic Feature Extraction LMMs are capable of understanding and analyzing the semantic content of images and their relationship with human cognition. They assess whether different parts of an image form a cohesive whole and evaluate whether the elements within the picture are semantically coherent [7, 21, 28]. mPLUG-Owl2 [50] employs a modality-adaptive language decoder to handle different modalities within distinct modules, which mitigates the issue of modality interference. Given the importance of effectively guiding the model through textual prompts to elicit the desired output, we have selected mPLUG-Owl2 as our feature extractor. We consider the application of mPLUG-Owl2 in the following aspects of semantic content: Large Multi-modality Model Assisted AI-Generated Image Quality Assessment \u2022 Existence of Semantic Content. The importance of semantic content in an image lies in its ability to convey a clear and meaningful message to the viewer. An image lacking in semantic content may be difficult to understand, fail to effectively convey its intended message, reducing audience engagement and satisfaction. \u2022 Coherence of Semantic Content. The coherence of semantic content in an image relates to whether the generated image can provide a coherent, logically sound visual experience for human viewers. When the various parts of an image are semantically consistent, it is better able to convey a clear story, emotion, or message. In contrast, any inconsistency in the primary focus of images will greatly detract from their quality and convey a significantly negative impression. Consequently, we try to propose the rational design of prompts leading LMMs to obtain those image semantic content. mPLUGOwl2 possess the ability to understand fine-grained semantic contents, but without carefully designed input prompts, some prompts, such as \"Please evaluate if the image quality is compromised due to violations of common human sense or logic?\" although it expresses the desire for the model to assess whether the semantic content of the image contradicts human perception, would lead to unsatisfactory results. To better utilize mPLUG-Owl2 for the task of evaluating AGIs, we meticulously designed prompts to guide the LMM. Specifically, we designed two prompts, denoted as \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4eand \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4f respctively, \u2022 \"Evaluate the input image to determine if its quality is compromised due to a lack of meaningful semantic content.\" \u2022 \"Evaluate if the image quality is compromised due to violations of coherence.\" corresponding to the existence of semantic content and the coherence of semantic content in images, respectively. Test results, as shown in Figure 5 using the mPLUG-Owl2 official demo1, have proven these questions to be effective. However, the textual output from mPLUG-Owl2 is not immediately conducive to being utilized by MANIQA to impart semantic insights. To bridge this gap, it\u2019s essential to obtain the information provided by mPLUG-Owl2 into a format that MANIQA can easily leverage. SO we extract features from the final layer of mPLUGOwl2\u2019s hidden layers, achieving an accessible embedded representation of the LMM\u2019s output. This output is a tensor with dimensions of [token_length, hidden_size], where \"token_length\" represents the number of output tokens, and \"hidden_size\" denotes the dimensionality of the hidden layer representations associated with each token. For mPLUG-Owl2, the hidden_size is set to 4096. Subsequently, we conduct an averaging operation across the token dimension, yielding a vector with dimensions 1x4096. This vector then serves as the basis for further feature fusion procedures. The process can be represented as Equation (3) : (m1 \ud835\udc56, m2 \ud835\udc56, \u00b7 \u00b7 \u00b7 , m\ud835\udc5b \ud835\udc56) = M([\ud835\udc56\ud835\udc5a\ud835\udc4e\ud835\udc54\ud835\udc52], [\ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc56])[\u22121], \ud835\udc53\ud835\udc56= Average(m1 \ud835\udc56, m2 \ud835\udc56, \u00b7, m\ud835\udc5b \ud835\udc56), where \ud835\udc56\u2208{\ud835\udc4e,\ud835\udc4f}, (3) 1https://modelscope.cn/studios/iic/mPLUG-Owl2/summary Evaluate if the image quality is compromised due to violations of coherence. Coherence of Semantic Content Yes, the image quality is compromised due to the violations of coherence. The image features a blurry cityscape with a ghostly appearance, but the blurriness and the presence of ghostly elements make it difficult to discern the actual details of the cityscape. The image is not clear, and the ghostly appearance might be distracting or confusing for the viewer. Evaluate the input image to determine if its quality is compromised due to a lack of meaningful semantic content. Existence of Semantic Content The input image is a close-up of a patterned surface, possibly a tiled floor or a decorative piece, The pattern is intricate and features a combination of blue, green, and yellow colors. However, the image lacks meaningful semantic content, as it does not depict any recognizable objects, people, or scenes. The focus is solely on the pattern, which might be visually appealing, but does not provide any context or information. Figure 5: Presentation of mPLUG-Owl2\u2019s answers to two prompts. where m\ud835\udc58 \ud835\udc56represents a hidden vector of token \ud835\udc58corresponding to \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc56, and M denotes mPLUG-Owl2. It is important to note that throughout the entire training and testing process, the parameters of mPLUG-Owl2 are fixed. Because mPLUG-Owl2 is typically pre-trained on large-scale datasets and has already learned a rich set of joint visual and language knowledge, it can effectively capture the fine-grained semantic information relevant to input prompts, even with fixed parameters. Additionally, fine-tuning LMMs in every training iteration would significantly increase training time. Using it solely as a feature extractor significantly reduces computational costs, making the training process more efficient. So, we pre-obtain and save the semantic content features of each image in advance. 3.3 Adaptive Fusion Module Given the complex influence of color, composition, details, semantic content, and other factors on image quality, simply concatenating the extracted features may not always yield the best results. To dynamically fuse a variety of complementary features, we propose the adaptive fusion module (AFM) for organic feature integration. Puyi Wang, Wei Sun, Zicheng Zhang, Jun Jia, Yanwei Jiang, Zhichao Zhang, Xiongkuo Min, Guangtao Zhai This process can be divided into two main parts. The first part involves transforming the extracted features into a unified vector space of the same dimension, allowing for vector fusion operations. Specifically, for features extracted by MANIQA, this transformation block applies a fully connected (Fc) layer, transforming them to the same dimension as the original features (1x784) to provide a richer combination. For features derived from mPLUG-Owl2, it uses a Fc layer to project them onto a 1x784 dimension, followed by a relu activation layer and a dropout layer to enhance the network\u2019s expressive power and generalization. The second part employs a MoE to dynamically fuse the three features. The MoE\u2019s gating network takes the transformed three features as input and outputs dynamic weights \ud835\udf36, corresponding to the three features\u2019 contributions to image quality. Structurally, this gating network comprises a Fc layer and a sigmoid layer. The final image quality representation vector g can be obtained through a weighted sum of the three feature vectors. Following the denotation which sign the three features as \ud835\udc531, \ud835\udc53\ud835\udc4e, \ud835\udc53\ud835\udc4f, this process can be represented as: f \u2032 \ud835\udc56= F \ud835\udc61\ud835\udc5f\ud835\udc4e\ud835\udc5b\ud835\udc60 \ud835\udc56 (f\ud835\udc56), where f \u2032 \ud835\udc56\u2208R\ud835\udc51, \ud835\udf36= F \ud835\udc54\ud835\udc4e\ud835\udc61\ud835\udc52(Concat(f \u2032 1, f \u2032 \ud835\udc4e, f \u2032 \ud835\udc4f)), where \ud835\udf36\u2208R3, g = \u2211\ufe013 \ud835\udc56=1 f \u2032 \ud835\udc56\u00b7 \ud835\udefc\ud835\udc56, where g \u2208R\ud835\udc51, \ud835\udc56\u2208{1,\ud835\udc4e,\ud835\udc4f}, (4) where F \ud835\udc61\ud835\udc5f\ud835\udc4e\ud835\udc5b\ud835\udc60 \ud835\udc56 is the transformation block of feature \ud835\udc56, and F \ud835\udc54\ud835\udc4e\ud835\udc61\ud835\udc52is the gating network\u2019s mapping function, \ud835\udc53\ud835\udc56is the original extracted feature and \ud835\udc53\u2032 \ud835\udc56is the transformed feature. R\ud835\udc51is the dimension space of \ud835\udc53\u2032 \ud835\udc56. Finally, we obtain the final image quality score output through a simple regression layer, consisting of a Fc layer. 4 EXPERIMENTS 4.1 Dataset and Evaluation Metrics Dataset. Our model is evaluated on two AI-Generated image datasets, including AIGCQA-20k [17] and AGIQA-3k [20]. Specifically, AIGCQA20k contains 20k images, but at the time of writing, only 14k images have been published. Our experiments are conducted on these 14k images. The MOS for AIGCQA-20k images are distributed between 0-5, with higher scores indicating better image quality. Images in AIGCQA-20k are generated by 15 models, including DALLE2 [32], DALLE3 [32], Dream [6], IF [4], LCM Pixart [22], LCM SD1.5 [22], LCM SDXL [22], Midjourney [11], Pixart \ud835\udefc[3], Playground [29], SD1.4 [34], SD1.5 [34], SDXL [35] and SSD1B [8]. AGIQA-3k includes 2982 images, with MOS also distributed between 0-5, where higher values represent better quality. Images in AGIQA-3k are derived from six models, including GLIDE [27], Stable Diffusion V-1.5 [34], Stable Diffusion XL-2.2 [35], Midjourney [11], AttnGAN [48], and DALLE2 [32]. During training, we split the entire dataset into 70% for training, 10% for validation, and 20% for testing. To ensure the same set of images in each subset when testing across different models, we set the same random seed during the split to control variables and ensure reproducibility. Evaluation Metric. Spearman\u2019s Rank-Order Correlation Coefficient (SRCC), Pearson\u2019s Linear Correlation Coefficient (PLCC), the Kullback-Leibler Correlation Coefficient (KLCC), and the Root Mean Square Error (RMSE) are selected as metrics to measure monotonicity and accuracy. SRCC, PLCC, and KLCC range from -1.0 to 1.0, with larger values indicating better results. In our experiments, we employ the sum of SRCC and PLCC as the criterion for selecting the optimal validation case, and emphasize SRCC for comparing model performance. 4.2 Implementation Details Our method is implemented based on PyTorch, and all experiments are conducted on 4 NVIDIA 3090 GPUs. For all datasets, we opt for handcrafted feature-based BRISQUE [24], NIQE [25] and ILNIQE [54], deep learning (DL)-based HyperIQA [39], MANIQA [49], MUSIQ [14], DBCNN [55], StairIQA [40], BAID [51], and LMMbased CLIPIQA [42], CLIPIQA+ [42] and Q-Align [45]. During the training process of deep learning models, we use the Adam optimizer [15] with a weight decay of 1e-5, and the initial learning rate is 1e-5. The batch size is 8 during training, validation, and testing. All DL-based models are trained for 30 epochs using MSE loss and validated after each training process. The checkpoint with the highest sum of SRCC and PLCC during validation is used for testing. Handcrafted feature-based and LMM based models are used directly without training. 4.3 Comparison with SOTA methods Table 1 lists the results of MA-AGIQA and 12 other models on the AGIQA-3k and AIGCQA-20k dataset. It has been observed that LMM-based models significantly outperform those that rely on handcrafted features. This superior performance is attributed to LMMs being trained on extensive datasets, which provides them with a robust understanding of images and enhances their generalizability. However, trained DL-based models generally perform far better than the LMM-based models because DL-based models tend to fit the data distribution of specific tasks better, thereby resulting in improved performance. Among these twelve models, the ViT-based MANIQA outperforms the other eleven models, and our method still significantly surpasses it on the same training and testing split with large margins (+3.72% of SRCC, +1.73% of PLCC and +5.43% of KRCC in AGIQA-3k & +1.61% of SRCC, +2.02% of PLCC and +2.90% of KRCC in AIGCQA-20k). This demonstrates the superiority of integrating features extracted by LMM into traditional DNN, significantly improving the accuracy and consistency of prediction results. To evaluate the generalization capability of our MA-AGIQA model, we conducted cross-dataset evaluations. Table 2 shows that MA-AGIQA significantly outperforms the other two models, HyperIQA and StairIQA, which performed best on single datasets, with large margins. This superior performance can largely be attributed to the robust generalization capability of the LMM and the benefits of the MoE architecture, which excels in dynamically fusing features. 4.4 Ablation Study Necessity of Fine-grained Semantic Features. To assess the benefits of integrating features extracted by mPLUG-Owl2 [50] into MANIQA [49], we carried out comprehensive ablation studies on each component and their various combinations, as detailed in Tables 3 and 4. Our findings indicate that using either the features extracted by the LMM alone or solely relying on a traditional network does not yield the best outcomes. In contrast, integrating one Large Multi-modality Model Assisted AI-Generated Image Quality Assessment Table 1: Comparisons with SOTA (State-Of-The-Art) methods on AGIQA-3k and AIGCQA-20K-Image datasets. The up arrow \"\u2191\" means that a larger value indicates better performance. The best and second best performances are bolded and underlined, respectively. MA-AGIQA outperforms existing SOTA methods on both datasets by large margins. Note: to ensure fair comparisons, we trained and tested all deep learning based models and ours with the same dataset splitting method. Type Method AGIQA-3k AIGCQA-20K-Image SRCC\u2191 PLCC\u2191 KRCC\u2191 RMSE\u2193 SRCC\u2191 PLCC\u2191 KRCC\u2191 RMSE\u2193 Handcrafted feature-based BRISQUE [24] 0.4726 0.5612 0.3227 0.8299 0.1663 0.3580 0.1112 0.6813 NIQE [25] 0.5236 0.5668 0.3637 0.8260 0.2085 0.3378 0.1394 0.6868 ILNIQE [54] 0.6097 0.6551 0.4318 0.7576 0.3359 0.4551 0.2290 0.6497 LMM-based CLIPIQA [42] 0.6524 0.6968 0.4632 0.7191 0.4147 0.6459 0.2861 0.5570 CLIPIQA+ [42] 0.6933 0.7493 0.4957 0.664 0.4553 0.6682 0.3169 0.5428 Q-Align [45] 0.6728 0.6910 0.4728 0.7204 0.6743 0.6815 0.4808 0.5199 Traditional DNN-based HyperIQA [39] 0.8509 0.9049 0.6685 0.4134 0.8162 0.8329 0.6207 0.3902 MANIQA [49] 0.8618 0.9115 0.6839 0.4111 0.8507 0.8870 0.6612 0.3273 DBCNN [55] 0.8263 0.8900 0.6393 0.4533 0.8054 0.8483 0.6121 0.3726 StairIQA [40] 0.8343 0.8933 0.6485 0.4510 0.7899 0.8428 0.6053 0.3927 BAID [51] 0.1304 0.2030 0.0854 0.9487 0.1652 0.1483 0.1279 0.7297 MUSIQ [14] 0.8261 0.8657 0.6400 0.4907 0.8329 0.8646 0.6403 0.3634 DL with LMM MA-AGIQA 0.8939 0.9273 0.7211 0.3756 0.8644 0.9050 0.6804 0.3104 Table 2: Cross-dataset performance comparison for M-AIGQQA, HyperIQA, and StairIQA. \u201cDirection\u201d from A to B means training with train subset of dataset A and testing on test subset of dataset B. The best result is bolded. direction SRCC \u2191 PLCC \u2191 KRCC \u2191 RMSE \u2193 MA-AGIQA 20k\u21923k 0.8053 0.8430 0.6083 0.5399 3k\u219220k 0.7722 0.8314 0.5777 0.4055 HyperIQA 20k\u21923k 0.6820 0.6806 0.4806 0.7352 3k\u219220k 0.6374 0.6547 0.4577 0.5414 StairIQA 20k\u21923k 0.4335 0.5234 0.3294 0.8549 3k\u219220k 0.6495 0.6895 0.4644 0.5285 fine-grained semantic feature with the original MANIQA network can enhance the network\u2019s performance. However, the optimal results were achieved by combining two features extracted by the LMM with MANIQA, which led to significant improvements on the AGIQA-3k dataset (increases of 1.57%, 0.83%, and 2.56% in SRCC, PLCC, and KRCC, respectively) and on the AIGCQA-20k dataset (enhancements of 2.72%, 1.94%, and 4.35%). The marked enhancements achieved by incorporating two finegrained semantic features suggest that LMM is adept at capturing nuanced, complex features that traditional models might overlook, fostering a more thorough understanding and assessment of AGIs quality. The results from these ablation experiments highlight the significant contribution of fine-grained semantic features. Contribution of MoE. Table 5 demonstrates that incorporating the MoE structure, rather than simply concatenating three vectors, does indeed improve network performance, albeit marginally. Specifically, on the AGIQA-3k dataset, we observed increases of 0.20%, 0.17%, and 0.16% in SRCC, PLCC, and KRCC, respectively. Table 3: Ablation studies of different component combinations in the MA-AGIQA model on AGIQA-3k. SRCC, PLCC and KRCC are reported. The best result is bolded. Note: \"semantic feature\" and \"coherence feature\" denote features extracted by mPLUG-Owl2 through \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4eand \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4frespectively. MANIQA Semantic Feature Coherence Feature SRCC\u2191 PLCC\u2191 KRCC\u2191 \u2713 0.8800 0.9196 0.7031 \u2713 0.8662 0.9082 0.6823 \u2713 0.8661 0.9084 0.6821 \u2713 \u2713 0.8685 0.9108 0.6853 \u2713 \u2713 0.8820 0.9197 0.7090 \u2713 \u2713 0.8699 0.9102 0.6867 \u2713 \u2713 \u2713 0.8939 0.9273 0.7211 For the AIGCQA-20k dataset, the improvements were 0.67%, 0.95%, and 1.37%. The gains, although seemingly modest, highlight the potential of MoE structure in complex systems where integrating diverse expertise can yield better decision-making and predictive outcomes. 4.5 Visualization To vividly demonstrate the efficacy of the MA-AGIQA framework, we selected 300 images from the AIGCQA-20k and AGIQA-3k datasets where MANIQA had the poorest performance. These images primarily exhibit issues in semantic content. We computed the absolute values of the differences between the model scores and the image ground truth, and illustrated these differences in Puyi Wang, Wei Sun, Zicheng Zhang, Jun Jia, Yanwei Jiang, Zhichao Zhang, Xiongkuo Min, Guangtao Zhai Table 4: Ablation studies of different component combinations in the MA-AGIQA model on AIGCQA-20k. SRCC, PLCC and KRCC are reported. The best result is bolded. Note: \"semantic feature\" and \"coherence feature\" denote features extracted by mPLUG-Owl2 through \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4eand \ud835\udc5d\ud835\udc5f\ud835\udc5c\ud835\udc5a\ud835\udc5d\ud835\udc61\ud835\udc4frespectively. MANIQA Semantic Feature Coherence Feature SRCC\u2191 PLCC\u2191 KRCC\u2191 \u2713 0.8415 0.8877 0.6520 \u2713 0.8184 0.8345 0.6323 \u2713 0.8181 0.8343 0.6320 \u2713 \u2713 0.8540 0.8975 0.6671 \u2713 \u2713 0.8596 0.9016 0.6738 \u2713 \u2713 0.8180 0.8323 0.6312 \u2713 \u2713 \u2713 0.8644 0.9050 0.6804 Table 5: Ablation studies on the MoE structure in the AFM demonstrate that compositions integrating MoE yield superior results on both AGIQA-3k and AIGCQA-20k datasets.The better result is bolded. dataset MoE SRCC \u2191 PLCC \u2191 KRCC \u2191 RMSE \u2193 3k \u2717 0.8921 0.9257 0.7199 0.3797 \u2713 0.8939 0.9273 0.7211 0.3756 20k \u2717 0.8586 0.8964 0.6712 0.3234 \u2713 0.8644 0.9050 0.6804 0.3104 Absolute Difference Density Absolute Difference Lower Absolute Difference Figure 6: Comparative Density Distributions of Absolute Differences for MANQA and MA-AGIQA on AGIQA-3k and AIGQA-20k Datasets Figure 6, using 0.1 as the bin size for plotting the quality score distribution. The results clearly show that our MA-AGIQA model are more closely aligned with human perception, with a noticeable shift in the difference distribution toward zero and a marked reduction in peak values. Figure 7 presents a collection of images where the assessments from the MANIQA model were mostly off the mark. Scores assigned by MANIQA alongside those given by the proposed MA-AGIQA model and the ground truth are listed, which reveal that the MAAGIQA model markedly enhances alignment with the ground truth in contrast to MANIQA. For instance, in the first image of the top row, MANIQA\u2019s score is 3.50, which diverging substantially from MANIQA MA-AGIQA Ground Truth 3.50 2.97 3.09 3.73 2.98 2.41 2.49 3.43 1.50 1.62 1.45 2.32 MANIQA MA-AGIQA Ground Truth 1.43 2.85 2.13 2.28 1.84 2.65 1.79 2.19 2.83 0.88 1.49 1.12 Figure 7: Comparative Analysis of Image Quality Assessment Models: Evaluating MANIQA versus MA-AGIQA Against Ground Truth Scores the ground truth score of 1.50. However, MA-AGIQA\u2019s score is 2.98, demonstrating a much closer approximation to the ground truth. This pattern is consistent across the images shown, with MAAGIQA consistently producing scores that are closer to the ground truth, reflecting a more accurate assessment of image quality. 5 CONCLUSION To mitigate the shortcomings of traditional DNNs in capturing semantic content in AGIs, this study explored the integration of LMMs with traditional DNNs and introduced the MA-AGIQA network. Leveraging mPLUG-Owl2 [50], our network efficiently extracts semantic features to enhance MANIQA [49] for quality assessment. The MA-AGIQA network\u2019s ability to dynamically integrate finegrained semantic features with quality-aware features enables it to effectively handle the varied quality aspects of AGIs. Experiment results across two prominent AGIs datasets confirm our model\u2019s superior performance. Through thorough ablation studies, the indispensable role of each component within our framework has been validated. This research aspires to catalyze further exploration into the fusion of LMMs within AI-generated content quality assessment and envisions broader application potentials for such methodology.", + "additional_info": [ + [ + { + "url": "http://arxiv.org/abs/2212.08066v1", + "title": "Mod-Squad: Designing Mixture of Experts As Modular Multi-Task Learners", + "abstract": "Optimization in multi-task learning (MTL) is more challenging than\nsingle-task learning (STL), as the gradient from different tasks can be\ncontradictory. When tasks are related, it can be beneficial to share some\nparameters among them (cooperation). However, some tasks require additional\nparameters with expertise in a specific type of data or discrimination\n(specialization). To address the MTL challenge, we propose Mod-Squad, a new\nmodel that is Modularized into groups of experts (a 'Squad'). This structure\nallows us to formalize cooperation and specialization as the process of\nmatching experts and tasks. We optimize this matching process during the\ntraining of a single model. Specifically, we incorporate mixture of experts\n(MoE) layers into a transformer model, with a new loss that incorporates the\nmutual dependence between tasks and experts. As a result, only a small set of\nexperts are activated for each task. This prevents the sharing of the entire\nbackbone model between all tasks, which strengthens the model, especially when\nthe training set size and the number of tasks scale up. More interestingly, for\neach task, we can extract the small set of experts as a standalone model that\nmaintains the same performance as the large model. Extensive experiments on the\nTaskonomy dataset with 13 vision tasks and the PASCAL-Context dataset with 5\nvision tasks show the superiority of our approach.", + "authors": "Zitian Chen, Yikang Shen, Mingyu Ding, Zhenfang Chen, Hengshuang Zhao, Erik Learned-Miller, Chuang Gan", + "published": "2022-12-15", + "updated": "2022-12-15", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "Multi-task Learning. Multi-task learning jointly learns multiple tasks by sharing parameters among tasks. One common approach is to manually design the architecture, sharing the bottom layers of a model across tasks [2,4,14]. Some works [34] design the architecture according to task af\ufb01nity. Others [1, 2, 32] leverage Neural Architecture Search or a routing network [30] to learn sharing patterns across tasks and automatically learn the architecture. Recently, transformer-based MTL architectures [36] have been explored and have shown advantages over CNN-based models. In comparison, we customize MoE layers into vision transformers; each MoE module constructs a minimum part of the model that can be distributed to a subset of all tasks instead of all tasks. As a result, our model is \ufb02exible in its creation of cooperation and specialization. Mixture of Experts (MoE). The MoE was \ufb01rst proposed by Jacobs et al. [12] as a technique to combine a series of sub-models and perform conditional computation. Recent work [31] in NLP proposes sparse MoE to reduce computation cost, and some works [8, 15] train gigantic models with trillions of parameters based on the sparse model. Some have used the MoE technique to train huge models in vision [29, 35] or multi-modal applications [26]. These works typically focused on combining the Feed-Forward Network layer with the MoE or develop a better routing strategy [16, 27]. MoA [40] proposes a new module that combines the attention network with the MoE while having a low computational cost and the same parameter budget as a regular attention network. More recently, M3ViT [18] uses MoE techniques to design a multi-task learning model that is computationally ef\ufb01cient during training. Compared to these previous methods, we demonstrate a MoE model that is not only computationally ef\ufb01cient, but is also \ufb02exible as a modularized multi-task learner that can easily induce both cooperation and specialization. Although M3ViT [18] also use MoE in their approach, the experts in their model share between all tasks and cannot be specialized for tasks. Pruning. Pruning refers to the process of removing components of a larger model to produce a smaller model for inference, with the goal of maintaining as much accuracy as possible while improving runtime computation ef\ufb01ciency. Generally, pruning is categorized into unstructured pruning [10], which removes individual weights that have a minimal contribution to accuracy and structured pruning [11, 17], which ranks \ufb01lters or blocks and prunes these based on some criterion. Usually, extra \ufb01ne-tuning is conducted for the pruned network to help maintain the performance [20, 28, 37]. Most of pruning is for single task and very few of them consider the case in multi-task learning. In this work, our proposed model has a unique property that a series of small sub-network for each task can be extracted from it with no performance drop and no additional \ufb01netuning. This is somehow similar to pruning but more likely to be an advantage of our model rather than a new way of pruning.", + "pre_questions": [], + "main_content": "Introduction Computer vision involves a great number of tasks including recognition, depth estimation, edge detection, etc. Some of them have a clear and strong relationship: they are likely to bene\ufb01t from shared features. An example would be a task to classify cars and pedestrians and a task to segment the same classes. Other tasks appear to be less related: it is not clear what features they would share. An example could be tumor detection in medical images and face recognition. Multi-task learning (MTL) aims to model the relationships among tasks and build a uni\ufb01ed model for a diverse set of tasks. On the one hand, tasks often bene\ufb01t by sharMod-Squad MoE ViT Experts Expert Group Tasks Expert Group Task Pool Task Pool Shared Experts Figure 1. A comparison between Mod-Squad and MoE ViT. Our key motivation is that experts should leverage commonalities in some tasks (cooperation) but focus on a subset of tasks that require speci\ufb01c features and do not interfere with each other (specialization). ing parameters, i.e., cooperation. On the other hand, some tasks may require specialized expertise that only bene\ufb01ts that single task, i.e., specialization. A good MTL system should be \ufb02exible to optimize experts for the dual purposes of cooperation and specialization. There are two well-known challenges in MTL: (1) gradient con\ufb02icts across tasks [5, 38]; and (2) how to design architectures that have both high accuracy and computational ef\ufb01ciency. Previous efforts include manually designing architectures [4] or conducting neural architecture search [1] to induce cooperation and specialization in different parts of the model. However, these methods either require heavy manual customization, reducing generality and limiting applicability, or require very long training times. To address these challenges, we introduce ModSquad, a new model that constructs a Mixture of Experts (MoE) [31] to be modularized multi-task learners (a squad). Our design allows experts to cooperate on tasks when it is helpful, rather than penalizing experts that do not participate in every task. At the same time, some experts naturally develop a deep specialization in particular tasks, improving performance. The left \ufb01gure in Fig. 1 shows an arXiv:2212.08066v1 [cs.CV] 15 Dec 2022 example of the specialization and cooperation of experts in Mod-Squad. A further and important side benefit, discussed below, is that this sparsification of experts allows our model to be decomposed into much smaller single-task models that perform extremely well. We achieve these goals by first integrating mixture of experts (MoE) layers into our vision transformer [6] backbone network. The motivation is to divide the model into groups of experts, and for each expert to construct a minimum part of the model that can be shared among tasks or be specialized for one task. The experts can have any network structure (e.g., MLP or attention network [40]) so that we can incorporate advanced model designs. Our modular design allows cooperation and specialization via the distribution of tasks to experts and also experts to tasks. Below, we formalize this idea mathematically by analyzing the probability distribution over tasks and experts, and using a novel loss function to induce a specific structure on this distribution. Many previous MoE works [29, 31, 40] use a loadbalancing loss that encourages the frequency of expert usage (across all tasks and batches) to be highly similar. Some MoE methods [18, 26] directly apply this loss after the forward pass of each task on the multi-task scenario so that each task evenly uses all experts. However, this approach may force experts to set parameters on conflicting tasks with learning gradients that counteract each other. In other words, while an expert may benefit from being shared among certain pairs of tasks, it may be harmed by being forced to share among other pairs of tasks. This is an explanation for the difficulty of training multi-task models under such an expert-balancing loss. In comparison, we contend that experts should leverage commonalities in some tasks (cooperation) but also create a subset of experts that learn specific features (as needed by some tasks) and do not interfere with each other (specialization). Such an assignment of tasks to experts can be represented via a sparse but strong dependence between experts and tasks. Fig. 1 illustrates this key difference between our model and previous MoE work, showing how our model induces a sparser structure in the assignment of experts to tasks. To implement this idea, we add a loss term to maximize the mutual information between experts and tasks. This induces a strong dependency between experts and tasks, with each task heavily related to a small set of experts and vice versa. Interestingly, we find that our model converges to a state in which, after training, most experts are never or rarely used for many tasks (evidence of specialization), but the experts are still balanced in their activation frequency. This property enables us to extract a compact sub-network from the giant model for each task. The small networks extracted in this fashion work independently as standalone models for individual tasks with no performance drop. This property enables us to train a giant, sparse model in a scaledup multi-task learning scenario and later get compact subnetworks for each task with high performance. Our main contributions can be summarized as follows: \u2022 Modular multi-task learner. We propose a new modular backbone model, Mod-Squad, that is composed of a large group of attention and feed-forward experts. The experts can be flexibly assigned a subset of tasks to achieve specialization and cooperation. \u2022 Optimizing the joint distribution over tasks and experts. Mod-Squad includes a new loss term that encourages a sparse but strong dependence between experts and tasks. This is done by measuring and maximizing the mutual information between tasks and experts. \u2022 Effective and Efficient multi-task learners at scale. Experiment results show that Mod-Squad achieves stateof-the-art performance on two major multi-task datasets while maintaining its computational efficiency. \u2022 Extracting small sets of experts as standalone models with no performance drop. We further show that ModSquad can be effectively pruned for a designated task without sacrificing performance. We start with the definition of multi-task learning. Suppose we have M tasks T1, T2, ..., TM and Q images I1, I2, ..., IQ. We define a task T as a function that maps image Iq to T(Iq). Our dataset D contains for each task Ti a set of training pairs (Iq; Ti(Iq)), e.g. (image; depthMap). Here, for simplicity, we assume that every task contains a training pair for every one of the Q images, but note that our approach can be extended to the case in which every task contains a different subset of images in its training pairs. 3.1. Preliminaries Mixture of Experts. A Mixture of Experts (MoE) layer typically contains a set of expert networks E1, E2, ..., EN along with a routing network G. The output of a MoE layer is the weighted sum of the output Ek(x) from every expert. The routing network model G calculates the weight Gk for each expert given input x. Formally, the output of a MoE layer is y = N \ufffd k=1 \ufffd k=1 Gk(x)Ek(x). (1) The routing network G is a Noisy Top-K Routing network [31] with parameters Wg and Wnoise. It models P(Ek|x) as the probability of using expert Ek and selects the Top-K to contribute to the final output. The whole process is shown as follows: G(x) = TopK(Softmax(xWg + N(0, 1) Softplus(xWnoise))), (2) where TopK(\u00b7, k) sets all elements in the vector to zero except the elements with the largest K values, Softplus is the smooth approximation to the ReLU function: Softplus(x) =log (1 + exp (x)) . (3) 3.2. Mod-Squad Mod-Squad is a multi-task model with the vision transformer as the backbone network and several parallel taskspecific heads. As shown in Fig. 2, a key design in our model is customizing MoE into the vision transformer so that each expert can construct a minimum part of the model that can be either shared between tasks or specialized for tasks. Specifically, we customize the MoE attention block (MoA) [40] and MoE MLP block [31] into the transformer layer. Each MoE block consists of N experts E1, E2, ..., EN which can be either an attention head or an MLP layer along with M task-specific routing networks G1, G2, ..., GM that select experts conditioned on input tokens. Note that each routing network Gi has its own parameters \ufffd W i g, W i noise \ufffd . We also add a learnable task embedding to the hidden input state so that each expert is aware of the target task. Thus, in Mod-Squad, the output of each MoE layer is y = N \ufffd k=1 \ufffd k=1 Gk i (x) \u00b7 Ek (x + ei) , (4) where i is the task id and ei is the respective task embedding. 3.3. A jointprobability model over tasks and experts In order to model cooperation and specialization, we define a probability model over tasks T and experts E. We assume that when our trained network is deployed, it will be assigned a random task T according to a global distribution over tasks P(T). (Typically we assume this distribution to be uniform over tasks.) Subsequently, it will be given a random image X according to P(X|T). For a given MoE layer, we model the probability P(Ei|Tj) of using expert Ei with task Tj as the frequency with which Ei is assigned to task Tj by the routing network. For example, for 100 images in task Tj, if the routing network assigns 30 of them to expert Ei, then P(Ei|Tj) = 0.3. Since the routing network does not make hard assignments of experts to tasks, but rather assigns weights resulting Tasks Unactivated / Activated Expert / Model Pool C. Prune & Get All Models A. Multi-Task Training with Mod-Squad Task Layer Norm Input Image Router MoEAttn Layer Attn MoE MLP Layer MLP Router Layer Norm x N Task Task B. Sparse and strong dependence between Experts and Tasks Task Pool Router Experts Router MoE Layer X Router MoE Layer X MoE Layer X Experts Experts Here, \u2018MoE Layer X\u2019 includes MoE Attn Layer and MoE MLP Layer. Task Task Figure 2. The pipeline of our multi-task foundation model. Each transformer block in Mod-Squad consists a MoE attention network (MoE attn.) and a MoE MLP network. The multi-task model Mod-Squad is trained with our proposed mutual information loss. Mod-Squad develops a strong dependence between experts and tasks. Then we can extract a small sub-network from Mod-Squad for each task with no performance drop. from a softmax function to each expert, we sum these soft weights to measure the frequency: P(Ei|Tj) = QTi X k=1 GEi Tj (xk), where GEi Tj gives the weight for expert Ei for task Tj on the input xk from image Ik. QTi is the number of images for task Ti. Given this de\ufb01nition of conditional probability, the joint probability P(E, T) = P(E|T)P(T), and of course, we can obtain P(E) = P T P(E, T). A key intuition in our work is that experts should be dependent on tasks, that is, experts should specialize in speci\ufb01c tasks, at least to some extent. This notion can be captured by measuring the mutual information (MI) between tasks and experts, using the probability model de\ufb01ned above: I(T; E) = M X i=1 N X j=1 P(Ti, Ej) log P(Ti, Ej) P(Ti)P(Ej). (5) If experts are assigned with equal frequency to all tasks, then the mutual information will be 0. If each expert is assigned to exactly one task (when M = N), then the dependence (and hence the mutual information) is maximized. 3.4. Maximize mutual information between experts and tasks To understand what mutual information do, we break down the Equation. 5 as following: I(T; E) = M X i=1 K X j=1 P(Ti, Ej) log P(Ti, Ej) \u2212 M X i=1 P(Ti) log P(Ti) \u2212 K X j=1 P(Ej) log P(Ej). (6) In Eq. 6, the \ufb01rst term is the negative entropy of P(Ti, Ej) = P(Ei|Tj)P(Tj). Maximizing this term encourages the sharpness of the conditional distributions P(Ei|Tj), since P(Tj) is a constant decided by data distribution, and is not affected by model parameters. The second term is the entropy of P(Ti) which, again, is a constant and can be ignored. The third term is the entropy of P(Ej). Maximizing the term encourages a high-entropy or \ufb02at distribution of P(Ej), encouraging the experts to be evenly used across the entire dataset. In practice, we add \u2212I(T; EY ) to our total loss for each MoE layer Y with a weight parameter wMI where EY represents all the experts in Y . We follow [13] to learn an auto-balancing weight wT for each task T and add the taskspeci\ufb01c loss LT for all tasks. So the total loss is L = M X i=1 wTiLTi \u2212wMI X \u2200MoE layers Y I(T; EY ). (7) 3.5. Train Once and Get All In previous MoE works [18,26], they use a subset of the experts for one input image but all the experts for each task. In comparison, Mod-Squad activates a subset of the experts when forwarding both single image and multiple images from the same task. Further, all the experts are evenly used in Mod-Squad when forwarding the whole multi-task dataset. This guarantees that the capacity of Mod-Squad is fully utilized and not wasted. A typical relation between tasks and experts will be demonstrated in Sec. 4.3. Bene\ufb01ting from the constant sparsity of Mod-Squad at the image-level and the task-level, unused or rarely used experts can be removed in each MoE module when doing single-task inference. This can be done by counting the using frequency of each expert for the task and removing those experts with smaller frequency than a threshold \u03b8. Note that some tasks could use more experts and others use less for each MoE layer. For example, a low-level task may require more experts at the \ufb01rst few layers of the network and a high-level task may require more experts at the last few layers of the network. Mod-Squad is capable of dynamically self-organize architecture and selecting experts according to the requirement of tasks, which provides some degree of freedom in architecture and extra \ufb02exibility in allocating model capacity. After removing experts, our pruned model can be directly deployed for the respective task. Since the removed experts are never or rarely used, the pruned model achieves the same level of performance as the original model but with a much smaller number of parameters and without any \ufb01netuning. In the case where we set \u03b8 = 0 and keep all the experts that have ever been used, we observe no drop in performance while still effectively pruning a large portion of the model. This removing experts process is similar to pruning, but we just adapt a simple thresh then remove strategy and no additional training is needed like in some of the pruning work [3]. Once training, a series of small sub-networks can be extracted for all tasks. This property enables us to build a very large model bene\ufb01t from all tasks, but only requires a fraction of model capacity for single-task inference or \ufb01ne-tuning. 4. Experiment 4.1. Experiments Settings Datasets and Tasks. We evaluate on two multi-task datasets: PASCAL-Context [25] and Taskonomy [39]. The PASCAL-Context includes 10,103 training images and 9,637 testing images with the \ufb01ve task annotation of edge detection (Edge), semantic segmentation (Seg.), human parts segmentation (H.Parts), surface normals (Norm.), and saliency detection (Sal.). The Taskonomy benchmark includes 3,793k training images and 600k testing images with 16 types of annotation. We use 13 annotations among them1 as our multi-task target: object classi\ufb01cation, scene classi\ufb01cation, depth estimation with euclidean depth, depth estimation with z-buffer depth, surface normals, curvature estimation, reshading, edge detection in 2D and 3D, keypoint detection in 2D and 3D, unsupervised segmentation in 2D and 2.5D. Details of these tasks can be found in [39]. Loss Functions and Evaluation Metrics. Classi\ufb01cation tasks and semantic segmentation use cross-entropy loss and pixel-wise cross-entropy loss respectively. Surface normals calculate the inverse of cosine similarity between the l2normalized prediction and ground truth. Curvature estimation uses L2 loss. All other tasks use L1 loss. We follow previous work [23] to use \u2206ti to evaluate a MTL model m as the average drop for task Ti with respect to the baseline model b: \u2206ti = (\u22121)si(Mm,i \u2212Mb,i)/Mb,i where Mm,i and Mb,i are the metrics of task Ti for the model m and b respectively, and si is 1 if the metric is the lower the better and 0 otherwise. We also report \u2206t as the average of \u2206ti on all tasks. For here, the baseline model b is the vanilla single-task learning model. On the taskonomy, for depth estimation, we also report root mean square error (rmse), absolute and relative errors between the prediction and the ground truth as well as the percentage of pixels whose prediction is within the thresholds of 1.25, 1.252, 1.253 to the ground truth following [7]. We also report accuracy (Acc) for classi\ufb01cation, L2 distance for curvature estimation, and L1 distance for all other tasks. These metrics are used to calculate \u2206ti and note that depth estimation use rmse only. On the PASCAL-Context, we follow [18] and report mean intersection over union (mIoU) for semantic and human parts segmentation, and saliency; mean error (mErr) for normals estimation, root mean square error (rmse) for depth estimation; and optimal dataset F-measure (odsF) for edge detection. Baselines and Competitors. We compare with the following baselines. STL: vanilla single-task learning baseline that trains its own model on each task independently. MTL: vanilla multi-task learning baseline that all tasks share the backbone model but have separate prediction heads. For our proposed model, we also have MLP and Attn (in Table. 2) that represent only MoE MLP and only MoE attention networks are customized into the transformer layer respectively. Mod-Squad w/ pruning (or pruning in Ta1Due to corrupt annotation for some samples, we discard three types of annotation (points, non\ufb01xated matches, and semantic segmentation). Model Obj. Cls. Scene Cls. Depth Euc. Normal Curvature Reshading Edge3D Keyp.2D Segm.2D Acc(%) \u2191Acc(%) \u2191RMSE \u2193 Error \u2193 \u03b4, within \u2191 L1 dis. \u2193L2 dis. \u2193 L1 dis. \u2193 L1 dis. \u2193L1 dis. \u2193L1 dis. \u2193 Abs. Rel. 1.25 1.252 1.253 STL 56.5 60.0 6.94 0.089 1.77 92.8 96.9 98.7 0.403 1.12 0.184 0.119 0.0312 0.171 MTL 57.3 64.9 6.75 0.084 1.26 93.0 97.0 98.9 0.386 1.06 0.170 0.127 0.0284 0.166 M3V iT [18] 58.0 65.6 6.69 0.083 1.26 93.2 97.2 98.9 0.383 1.05 0.174 0.126 0.0289 0.164 Mod-Squad 59.0 66.8 6.59 0.082 1.25 93.3 97.2 99.0 0.374 1.02 0.167 0.123 0.0275 0.161 Table 1. Metric for each task on the taskonomy dataset. For each task, we use different metrics to evaluate its performance. More results on other tasks can be found in the supplementary. Method STL MTL M3ViT MLP Attn Ours Pruning Params(M) 86.4 90.0 176.4 176.4 105.6 201.3 116.9 FLOPs(G) 17.7 18.5 19.7 19.7 19.7 19.7 18.4 Object Cls. 0.0 +1.4 +2.6 +3.0 +3.0 +4.4 +4.4 Scene Cls. 0.0 +8.1 +9.3 +10.0 +9.6 +11.3 +11.3 Depth Euc. 0.0 +2.7 +3.6 +3.9 +4.4 +5.0 +5.0 Depth Zbu. 0.0 +2.1 +2.4 +2.6 +2.4 +2.8 +2.8 Normal 0.0 +3.5 +4.2 +4.5 +4.5 +6.5 +6.5 Curvature 0.0 +5.3 +6.2 +7.1 +6.2 +8.9 +8.9 Reshading 0.0 +7.6 +5.4 +5.9 +8.1 +9.2 +9.2 Edge2D 0.0 +0.6 +2.0 +1.8 +1.2 +3.6 +3.6 Edge3D 0.0 -6.7 -5.8 -4.2 -5.8 -3.3 -3.3 Keyp.2D 0.0 +5.3 +3.6 +3.6 +6.3 +8.3 +8.3 Keyp.3D 0.0 +1.3 +2.7 +4.1 +2.7 +5.5 +5.5 Segm. 2D. 0.0 +2.9 +4.0 +5.2 +3.5 +5.8 +5.8 Segm. 2.5D 0.0 +1.9 +3.2 +3.8 +3.2 +5.1 +5.1 Mean 0.0 +2.8 +3.3 +3.9 +3.8 +5.6 +5.6 Table 2. Comparison of \u2206t between MTL methods on the Taskonomy. We report their average drop for each task with respect to the vanilla single-task model. MLP and Attn represent using only MoE MLP and MoE attention network in the backbone respectively. ble. 2) is Mod-Squad with experts removing for each speci\ufb01c task and we report the maximum FLOPs and Params over all tasks. We also compare with M 3V iT [18] and several state-of-the-art MTL models: MTAN [19], CrossStitch [24] and NDDR-CNN [9]. Further, we compare with modi\ufb01ed-MoE: it has the same architecture as Mod-Squad but without our mutual information loss. It applies the standard balanced loss [40] after forward propagation of all tasks for each image instead of one task. As a result, experts will be evenly used for all tasks instead of for every task. Implementation. We use ViT-base [6] and ViT-small as backbone networks on the Taskonomy and the PASCALContext respectively. We introduce MoA and MoE MLP into ViT every two layers. For MoA, we follow [40] to design the block and use 15 experts with top-k as 6 for ViTsmall and 24 experts with top-k as 12 for ViT-base. For MoE MLP, we use 16 experts with top-k as 4. The taskSTL MTL \ud835\udc40!\ud835\udc49\ud835\udc56\ud835\udc47 STL MTL \ud835\udc40!\ud835\udc49\ud835\udc56\ud835\udc47 Figure 3. Ablation study on pruning. We explore two ways of pruning: (1) thresh then remove with \u03b8 (2) Keep the top H% experts that have the highest used frequency in each MoE module. For the \ufb01rst way of pruning, we report results with \u03b8 as 90%, 50%, 20%, 5%, 0.1%, and 0.0% (no pruning). For the second way of pruning, we report results with H% as 30%, 40%, 60%, 80%, and 100% (no pruning). We also compare our pruning with applying the same pruning strategy on modi\ufb01ed-MoE (m-MoE). speci\ufb01c heads are single linear layers on the Taskonomy and multiple layers network same as [18] on the PASCALContext. We set wMI = 0.001 and removed threshold \u03b8 = 1.0%. On the PASCAL-Context, the hyperparameters are the same as in M 3V iT [18]. On the Taskonomy, we set the base learning rate to 2e \u22124 with a batch size of 1, 440 and AdamW [22] as the optimizer. The weight decay is 0.05. We use 10 warmup epochs with 100 total training epochs and the model converges in 80 hours with 240 NVIDIA V100 GPUs. Cosine decay [21] is used for the learning rate schedule. 4.2. Results on MTL Ef\ufb01cacy. We demonstrate the ef\ufb01cacy of our model in performance, computation cost, and model capacity. The results on the Taskonomy and the PASCAL-Context are shown in Table. 2 and Table. 3 respectively. Speci\ufb01c metrics for each task on the Taskonomy is shown in Table. A1. In terms of performance, our method signi\ufb01cantly outperforms other baselines and competitors on both datasets: we beat MTL and M3ViT for over 2 points in mean \u2206t Method Backbone Seg. Norm. H. Parts Sal. Edge \u2206t FLOPs Params mIoU\u2191mErr\u2193 mIoU\u2191 mIoU\u2191odsF\u2191 (%)\u2191 (G)\u2193 (M)\u2193 STL ResNet-18 66.2 13.9 59.9 66.3 68.8 0.00 1.8 11 MTL ResNet-18 63.8 14.9 58.6 65.1 69.2 \u22122.86 1.8 11 MTAN [19] ResNet-18 63.7 14.8 58.9 65.4 69.6 \u22122.39 1.8 11 Cross-Stitch [24] ResNet-18 66.1 13.9 60.6 66.8 69.9 +0.60 1.8 11 NDDR-CNN [9] ResNet-18 65.4 13.9 60.5 66.8 69.8 +0.39 1.8 11 MTL ViT-small 70.7 15.5 58.7 64.9 68.8 \u22121.77 4.6 21 M 3V iT [18] MoE ViT-small 72.8 14.5 62.1 66.3 71.7 +2.71 5.2 42 Mod-Squad MoE ViT-small 74.1 13.7 62.7 66.9 72.0 +4.72 5.2 50 Mod-Squad w/ Pruning MoE ViT-small 74.1 13.7 62.6 66.9 71.9 +4.65 5.2 22 Table 3. Quantitative Results on the PASCAL-Context. Mod-Squad constantly outperform other MTL methods on all tasks. 1 2 3 4 5 Proportion of Dataset (%) 15 20 25 30 35 Acc (%) FSL on Cls. Object 1 2 3 4 5 Proportion of Dataset (%) 15 20 25 30 35 Acc (%) FSL on Cls. Scene Ours Task LR finetune Figure 4. Router \ufb01ne-tuning can quickly learn new tasks by selecting proper experts. We train our model on the other 11 tasks from the Taskonomy and transfer to cls. object and cls.scene with few training samples. We compare the few-shot classi\ufb01cation accuracy with the following three baselines. (1) Fine-tuning: We \ufb01ne-tune the whole model on the few training samples. (2) Task: we freeze the backbone model and only train the new task-speci\ufb01c head. (3) LR: the state-of-the-art few-shot learning method [33] based on logistic regression. We report the test accuracy when training with 0.5%, 1%, 2%, and 5% of the training set. on the two datasets. On Taskonomy, we defeat MTL on all tasks, which proves the improvement is consistent. In terms of computation cost and model capacity, our model with ViT-Base backbone has a very low computation cost (19.7G FLOPs) while bene\ufb01ting from a huge model capacity (201.3M). In comparison, MTL baselines with ViTBase use 18.5G FLOPs with 86.4M parameters. Furthermore, our standalone pruned model keeps the same performance as Mod-Squad for each individual task when having the same level of computation cost and model capacity as STL: 18.4 FLOPs vs. 17.7 FLOPs and 116.9M vs. 86.4M. The extra computation cost is mainly from the lightweight routing network and the extra parameters can be further removed with a higher \u03b8 as will be shown later. Ablation study on MoE Mlp and MoE Attention. As shown in Table. 2, we report results (MLP and Attn in Table. 2) where we only introduce MoE into MLP and attention networks. Both ways of adding experts can improve > 1.0% in \u2206t compared to MTL. By combining them, Mod-Squad gets the best result and further boost 2 points in \u2206t. This demonstrates that introducing MoE and increasing model capacity in both attention and MLP network can increase the performance. 4.3. Experts, Tasks, and Pruning Relation between experts and tasks. As shown in Fig. 5, we visualize the frequency of experts being selected for each task. The x-axis and y-axis represent experts and tasks respectively. Experiments are conducted on the Taskonomy with all 13 tasks using MoE ViT-Small as the backbone. The visualization is for the MoE attention module in the 6th transformer block. We also compare with modi\ufb01ed-MoE and Normal MoE which have different MoE losses but the exact model architecture. From the \ufb01gure, we observe that our expert activation map is sharper and more sparse than the two comparisons, which aligns with our key motivation: a sparse but strong dependence between experts and tasks helps MTL. Extracting sub-network for an individual task. As introduced in Sec. 3.5, we extract a small sub-network from Mod-Squad for an individual task. Speci\ufb01cally, we explore two ways of removing experts as follows. (1) Thresh and remove: we simply remove all experts that have an usage frequency lower than \u03b8 for the speci\ufb01c task. Note that some MoE modules could have fewer than Top-K experts after removing if most of the experts have a low usage frequency. In that case, we reduce the top-k of that MoE module to the number of experts it keeps. (2) Keep the top: we keep the top H% experts in each MoE module that have the highest usage frequency. The results are shown in Fig. 3. For the \ufb01rst way of removing experts, we try \u03b8 as 90%, 50%, 20%, 5%, 0.1%, and 0% (no removing). For the second way, we try H% as 50%, 20%, 5%, and 0% (no removing). For both removing strategies, we compare with STL, MTL, and M3ViT. From the \ufb01gure, we notice several interesting observations: (1) Mod-Squad m-MoE Normal MoE Figure 5. Visualization of the frequency that experts being selected for each task. We visualize the activation frequency of a MoE attention module in the 6-th transformer block with 15 experts and top-k as 6. The y-axis represents the tasks and the x-axis represents the 15 experts. We compare the visualization of ModSquad to m-MoE and normal MoE. All three methods have the exact same MoE module but with different MoE losses. Our frequency map is much sharp and sparse than other methods. Mod-Squad can remove the majority of extra experts than a normal ViT-Base (116.9M vs. 90.0M in model parameters) with a tiny performance lost (< 0.3% in \u03b4t) and still better than competitors. (2) Only keeping the top 40% of experts still give us the same performance (5.5% in \u03b4t while the best is 5.6%). (3) The performance of modi\ufb01ed-MoE signi\ufb01cantly drops when removing more experts, which prove the effectiveness of our mutual information loss. Fine-tuning the router network. Another interesting property of Mod-Squad is that we can quickly adapt to new tasks by only tuning the lightweight routing network and the task-speci\ufb01c head with all other parameters frozen. We refer to this technique as router \ufb01ne-tuning. Router \ufb01netuning can be generalized to any MoE network when they need lightweight tuning with limited budgets in dataset size, computation cost, or training time. As shown in Fig. 4, we explore router \ufb01ne-tuning. We \ufb01rst pre-train our model on 11 tasks on the Taskonomy except for cls. object and cls. scene as the target of new tasks. We compare different ways of \ufb01ne-tuning with limited training examples. We report performance using 0.5%, 1%, 2%, and 5% of the dataset to learn the new tasks. The router \ufb01ne-tuning strategy is compared with several baselines as follows. (1) Fine-tuning: \ufb01ne-tune the whole model and learn the new task-speci\ufb01c head. (2) Task: freeze the backbone model and only learn the new task heads. (3) We follow the state-of-the-art few-shot learning method [33] based on logistic regression to \ufb01ne-tune the model. From the \ufb01gure, we \ufb01nd that the router \ufb01ne-tuning strategy surpasses other baselines constantly on both tasks with different proportions of the training set. These results show that Mod-Squad can be quickly adapted for various purposes with router \ufb01ne-tuning. Task Relation. Mod-Squad can not only model the task relation implicitly like other multi-task models but also visualize it explicitly. We de\ufb01ne the similarity between tasks Cls. object Cls. scene Edge2D Keyp. 2d Segm. 2d Segm. 25d Normal Reshading Depth euc. Depth zbu. Edge3D Keyp. 3d Curvature Cls. object Cls. scene Edge2D Keyp. 2d Segm. 2d Segm. 25d Normal Reshading Depth euc. Depth zbu. Edge3D Keyp. 3d Curvature 1 0.51 0.3 0.25 0.42 0.49 0.48 0.41 0.4 0.4 0.41 0.49 0.49 0.51 1 0.42 0.34 0.4 0.46 0.49 0.5 0.52 0.48 0.55 0.45 0.54 0.3 0.42 1 0.7 0.35 0.29 0.26 0.25 0.31 0.32 0.25 0.34 0.26 0.25 0.34 0.7 1 0.35 0.27 0.29 0.27 0.32 0.32 0.28 0.39 0.34 0.42 0.4 0.35 0.35 1 0.7 0.43 0.52 0.44 0.41 0.48 0.43 0.48 0.49 0.46 0.29 0.27 0.7 1 0.58 0.64 0.57 0.55 0.6 0.5 0.59 0.48 0.49 0.26 0.29 0.43 0.58 1 0.7 0.62 0.65 0.64 0.61 0.67 0.41 0.5 0.25 0.27 0.52 0.64 0.7 1 0.8 0.76 0.79 0.64 0.68 0.4 0.52 0.31 0.32 0.44 0.57 0.62 0.8 1 0.93 0.73 0.58 0.71 0.4 0.48 0.32 0.32 0.41 0.55 0.65 0.76 0.93 1 0.72 0.59 0.66 0.41 0.55 0.25 0.28 0.48 0.6 0.64 0.79 0.73 0.72 1 0.66 0.68 0.49 0.45 0.34 0.39 0.43 0.5 0.61 0.64 0.58 0.59 0.66 1 0.63 0.49 0.54 0.26 0.34 0.48 0.59 0.67 0.68 0.71 0.66 0.68 0.63 1 0.0 0.2 0.4 0.6 0.8 1.0 Figure 6. Task relation from Mod-Squad. We evaluate the similarity between tasks as the mean of the percentage of experts that they are sharing with the same input. as the mean of the percentage of experts that they are sharing given the same input. If two tasks are sharing more experts than other pairs of tasks, they are considered to be more related. This de\ufb01nition may not be perfectly accurate but is based on one simple rule: related tasks are more likely to share experts than unrelated tasks. As shown in Fig. 6, Mod-Squad visualizes task relations in a correlation matrix with our new de\ufb01nition of task similarity. We notice that some of the structures among tasks are interesting: the 3D tasks including Normal, Reshading, two depth tasks, Edge3D, Keyp. 3D and curvature are grouped together; closed relation exists among two segmentations tasks and among two two depth tasks; Edge2D and Edge3D are not closed in the visualization. It demonstrates ModSquad can also be used as a visualization tool to explore the structure among tasks. 5. Conclusion In this work, we propose Mod-Squad, a modular multitask learner based on mixture-of-experts and a novel loss to address the gradient con\ufb02icts among tasks. We demonstrate its potential to scale up in both model capacity and target task numbers while keeping the computation cost low. It is noteworthy that Mod-Squad can be scaled down in model size with no performance drop for speci\ufb01c purposes. Future work could extend Mod-Squad to a large variety of tasks and scenes not only in the vision domain but also in other modalities (e.g., text and audio). We hope Mod-Squad will become an important building block of future ef\ufb01cient and modular foundation models." + }, + { + "url": "http://arxiv.org/abs/2103.16716v1", + "title": "BASE Layers: Simplifying Training of Large, Sparse Models", + "abstract": "We introduce a new balanced assignment of experts (BASE) layer for large\nlanguage models that greatly simplifies existing high capacity sparse layers.\nSparse layers can dramatically improve the efficiency of training and inference\nby routing each token to specialized expert modules that contain only a small\nfraction of the model parameters. However, it can be difficult to learn\nbalanced routing functions that make full use of the available experts;\nexisting approaches typically use routing heuristics or auxiliary\nexpert-balancing loss functions. In contrast, we formulate token-to-expert\nallocation as a linear assignment problem, allowing an optimal assignment in\nwhich each expert receives an equal number of tokens. This optimal assignment\nscheme improves efficiency by guaranteeing balanced compute loads, and also\nsimplifies training by not requiring any new hyperparameters or auxiliary\nlosses. Code is publicly released at https://github.com/pytorch/fairseq/", + "authors": "Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, Luke Zettlemoyer", + "published": "2021-03-30", + "updated": "2021-03-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.05144v1", + "title": "Mixture of Attention Heads: Selecting Attention Heads Per Token", + "abstract": "Mixture-of-Experts (MoE) networks have been proposed as an efficient way to\nscale up model capacity and implement conditional computing. However, the study\nof MoE components mostly focused on the feedforward layer in Transformer\narchitecture. This paper proposes the Mixture of Attention Heads (MoA), a new\narchitecture that combines multi-head attention with the MoE mechanism. MoA\nincludes a set of attention heads that each has its own set of parameters.\nGiven an input, a router dynamically selects a subset of $k$ attention heads\nper token. This conditional computation schema allows MoA to achieve stronger\nperformance than the standard multi-head attention layer. Furthermore, the\nsparsely gated MoA can easily scale up the number of attention heads and the\nnumber of parameters while preserving computational efficiency. In addition to\nthe performance improvements, MoA also automatically differentiates heads'\nutilities, providing a new perspective to discuss the model's interpretability.\nWe conducted experiments on several important tasks, including Machine\nTranslation and Masked Language Modeling. Experiments have shown promising\nresults on several tasks against strong baselines that involve large and very\ndeep models.", + "authors": "Xiaofeng Zhang, Yikang Shen, Zeyu Huang, Jie Zhou, Wenge Rong, Zhang Xiong", + "published": "2022-10-11", + "updated": "2022-10-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1812.08928v1", + "title": "Slimmable Neural Networks", + "abstract": "We present a simple and general method to train a single neural network\nexecutable at different widths (number of channels in a layer), permitting\ninstant and adaptive accuracy-efficiency trade-offs at runtime. Instead of\ntraining individual networks with different width configurations, we train a\nshared network with switchable batch normalization. At runtime, the network can\nadjust its width on the fly according to on-device benchmarks and resource\nconstraints, rather than downloading and offloading different models. Our\ntrained networks, named slimmable neural networks, achieve similar (and in many\ncases better) ImageNet classification accuracy than individually trained models\nof MobileNet v1, MobileNet v2, ShuffleNet and ResNet-50 at different widths\nrespectively. We also demonstrate better performance of slimmable models\ncompared with individual ones across a wide range of applications including\nCOCO bounding-box object detection, instance segmentation and person keypoint\ndetection without tuning hyper-parameters. Lastly we visualize and discuss the\nlearned features of slimmable networks. Code and models are available at:\nhttps://github.com/JiahuiYu/slimmable_networks", + "authors": "Jiahui Yu, Linjie Yang, Ning Xu, Jianchao Yang, Thomas Huang", + "published": "2018-12-21", + "updated": "2018-12-21", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1911.12423v2", + "title": "AdaShare: Learning What To Share For Efficient Deep Multi-Task Learning", + "abstract": "Multi-task learning is an open and challenging problem in computer vision.\nThe typical way of conducting multi-task learning with deep neural networks is\neither through handcrafted schemes that share all initial layers and branch out\nat an adhoc point, or through separate task-specific networks with an\nadditional feature sharing/fusion mechanism. Unlike existing methods, we\npropose an adaptive sharing approach, called AdaShare, that decides what to\nshare across which tasks to achieve the best recognition accuracy, while taking\nresource efficiency into account. Specifically, our main idea is to learn the\nsharing pattern through a task-specific policy that selectively chooses which\nlayers to execute for a given task in the multi-task network. We efficiently\noptimize the task-specific policy jointly with the network weights, using\nstandard back-propagation. Experiments on several challenging and diverse\nbenchmark datasets with a variable number of tasks well demonstrate the\nefficacy of our approach over state-of-the-art methods. Project page:\nhttps://cs-people.bu.edu/sunxm/AdaShare/project.html.", + "authors": "Ximeng Sun, Rameswar Panda, Rogerio Feris, Kate Saenko", + "published": "2019-11-27", + "updated": "2020-11-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1711.01239v2", + "title": "Routing Networks: Adaptive Selection of Non-linear Functions for Multi-Task Learning", + "abstract": "Multi-task learning (MTL) with neural networks leverages commonalities in\ntasks to improve performance, but often suffers from task interference which\nreduces the benefits of transfer. To address this issue we introduce the\nrouting network paradigm, a novel neural network and training algorithm. A\nrouting network is a kind of self-organizing neural network consisting of two\ncomponents: a router and a set of one or more function blocks. A function block\nmay be any neural network - for example a fully-connected or a convolutional\nlayer. Given an input the router makes a routing decision, choosing a function\nblock to apply and passing the output back to the router recursively,\nterminating when a fixed recursion depth is reached. In this way the routing\nnetwork dynamically composes different function blocks for each input. We\nemploy a collaborative multi-agent reinforcement learning (MARL) approach to\njointly train the router and function blocks. We evaluate our model against\ncross-stitch networks and shared-layer baselines on multi-task settings of the\nMNIST, mini-imagenet, and CIFAR-100 datasets. Our experiments demonstrate a\nsignificant improvement in accuracy, with sharper convergence. In addition,\nrouting networks have nearly constant per-task training cost while cross-stitch\nnetworks scale linearly with the number of tasks. On CIFAR-100 (20 tasks) we\nobtain cross-stitch performance levels with an 85% reduction in training time.", + "authors": "Clemens Rosenbaum, Tim Klinger, Matthew Riemer", + "published": "2017-11-03", + "updated": "2017-12-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV", + "cs.NE" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1808.06866v1", + "title": "Soft Filter Pruning for Accelerating Deep Convolutional Neural Networks", + "abstract": "This paper proposed a Soft Filter Pruning (SFP) method to accelerate the\ninference procedure of deep Convolutional Neural Networks (CNNs). Specifically,\nthe proposed SFP enables the pruned filters to be updated when training the\nmodel after pruning. SFP has two advantages over previous works: (1) Larger\nmodel capacity. Updating previously pruned filters provides our approach with\nlarger optimization space than fixing the filters to zero. Therefore, the\nnetwork trained by our method has a larger model capacity to learn from the\ntraining data. (2) Less dependence on the pre-trained model. Large capacity\nenables SFP to train from scratch and prune the model simultaneously. In\ncontrast, previous filter pruning methods should be conducted on the basis of\nthe pre-trained model to guarantee their performance. Empirically, SFP from\nscratch outperforms the previous filter pruning methods. Moreover, our approach\nhas been demonstrated effective for many advanced CNN architectures. Notably,\non ILSCRC-2012, SFP reduces more than 42% FLOPs on ResNet-101 with even 0.2%\ntop-5 accuracy improvement, which has advanced the state-of-the-art. Code is\npublicly available on GitHub: https://github.com/he-y/soft-filter-pruning", + "authors": "Yang He, Guoliang Kang, Xuanyi Dong, Yanwei Fu, Yi Yang", + "published": "2018-08-21", + "updated": "2018-08-21", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1510.00149v5", + "title": "Deep Compression: Compressing Deep Neural Networks with Pruning, Trained Quantization and Huffman Coding", + "abstract": "Neural networks are both computationally intensive and memory intensive,\nmaking them difficult to deploy on embedded systems with limited hardware\nresources. To address this limitation, we introduce \"deep compression\", a three\nstage pipeline: pruning, trained quantization and Huffman coding, that work\ntogether to reduce the storage requirement of neural networks by 35x to 49x\nwithout affecting their accuracy. Our method first prunes the network by\nlearning only the important connections. Next, we quantize the weights to\nenforce weight sharing, finally, we apply Huffman coding. After the first two\nsteps we retrain the network to fine tune the remaining connections and the\nquantized centroids. Pruning, reduces the number of connections by 9x to 13x;\nQuantization then reduces the number of bits that represent each connection\nfrom 32 to 5. On the ImageNet dataset, our method reduced the storage required\nby AlexNet by 35x, from 240MB to 6.9MB, without loss of accuracy. Our method\nreduced the size of VGG-16 by 49x from 552MB to 11.3MB, again with no loss of\naccuracy. This allows fitting the model into on-chip SRAM cache rather than\noff-chip DRAM memory. Our compression method also facilitates the use of\ncomplex neural networks in mobile applications where application size and\ndownload bandwidth are constrained. Benchmarked on CPU, GPU and mobile GPU,\ncompressed network has 3x to 4x layerwise speedup and 3x to 7x better energy\nefficiency.", + "authors": "Song Han, Huizi Mao, William J. Dally", + "published": "2015-10-01", + "updated": "2016-02-15", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.NE" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2101.03961v3", + "title": "Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity", + "abstract": "In deep learning, models typically reuse the same parameters for all inputs.\nMixture of Experts (MoE) defies this and instead selects different parameters\nfor each incoming example. The result is a sparsely-activated model -- with\noutrageous numbers of parameters -- but a constant computational cost. However,\ndespite several notable successes of MoE, widespread adoption has been hindered\nby complexity, communication costs and training instability -- we address these\nwith the Switch Transformer. We simplify the MoE routing algorithm and design\nintuitive improved models with reduced communication and computational costs.\nOur proposed training techniques help wrangle the instabilities and we show\nlarge sparse models may be trained, for the first time, with lower precision\n(bfloat16) formats. We design models based off T5-Base and T5-Large to obtain\nup to 7x increases in pre-training speed with the same computational resources.\nThese improvements extend into multilingual settings where we measure gains\nover the mT5-Base version across all 101 languages. Finally, we advance the\ncurrent scale of language models by pre-training up to trillion parameter\nmodels on the \"Colossal Clean Crawled Corpus\" and achieve a 4x speedup over the\nT5-XXL model.", + "authors": "William Fedus, Barret Zoph, Noam Shazeer", + "published": "2021-01-11", + "updated": "2022-06-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2106.05974v1", + "title": "Scaling Vision with Sparse Mixture of Experts", + "abstract": "Sparsely-gated Mixture of Experts networks (MoEs) have demonstrated excellent\nscalability in Natural Language Processing. In Computer Vision, however, almost\nall performant networks are \"dense\", that is, every input is processed by every\nparameter. We present a Vision MoE (V-MoE), a sparse version of the Vision\nTransformer, that is scalable and competitive with the largest dense networks.\nWhen applied to image recognition, V-MoE matches the performance of\nstate-of-the-art networks, while requiring as little as half of the compute at\ninference time. Further, we propose an extension to the routing algorithm that\ncan prioritize subsets of each input across the entire batch, leading to\nadaptive per-image compute. This allows V-MoE to trade-off performance and\ncompute smoothly at test-time. Finally, we demonstrate the potential of V-MoE\nto scale vision models, and train a 15B parameter model that attains 90.35% on\nImageNet.", + "authors": "Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, Andr\u00e9 Susano Pinto, Daniel Keysers, Neil Houlsby", + "published": "2021-06-10", + "updated": "2021-06-10", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1609.02132v1", + "title": "UberNet: Training a `Universal' Convolutional Neural Network for Low-, Mid-, and High-Level Vision using Diverse Datasets and Limited Memory", + "abstract": "In this work we introduce a convolutional neural network (CNN) that jointly\nhandles low-, mid-, and high-level vision tasks in a unified architecture that\nis trained end-to-end. Such a universal network can act like a `swiss knife'\nfor vision tasks; we call this architecture an UberNet to indicate its\noverarching nature.\n We address two main technical challenges that emerge when broadening up the\nrange of tasks handled by a single CNN: (i) training a deep architecture while\nrelying on diverse training sets and (ii) training many (potentially unlimited)\ntasks with a limited memory budget. Properly addressing these two problems\nallows us to train accurate predictors for a host of tasks, without\ncompromising accuracy.\n Through these advances we train in an end-to-end manner a CNN that\nsimultaneously addresses (a) boundary detection (b) normal estimation (c)\nsaliency estimation (d) semantic segmentation (e) human part segmentation (f)\nsemantic boundary detection, (g) region proposal generation and object\ndetection. We obtain competitive performance while jointly addressing all of\nthese tasks in 0.7 seconds per frame on a single GPU. A demonstration of this\nsystem can be found at http://cvn.ecp.fr/ubernet/.", + "authors": "Iasonas Kokkinos", + "published": "2016-09-07", + "updated": "2016-09-07", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2204.09636v3", + "title": "Residual Mixture of Experts", + "abstract": "Mixture of Experts (MoE) is able to scale up vision transformers effectively.\nHowever, it requires prohibiting computation resources to train a large MoE\ntransformer. In this paper, we propose Residual Mixture of Experts (RMoE), an\nefficient training pipeline for MoE vision transformers on downstream tasks,\nsuch as segmentation and detection. RMoE achieves comparable results with the\nupper-bound MoE training, while only introducing minor additional training cost\nthan the lower-bound non-MoE training pipelines. The efficiency is supported by\nour key observation: the weights of an MoE transformer can be factored into an\ninput-independent core and an input-dependent residual. Compared with the\nweight core, the weight residual can be efficiently trained with much less\ncomputation resource, e.g., finetuning on the downstream data. We show that,\ncompared with the current MoE training pipeline, we get comparable results\nwhile saving over 30% training cost. When compared with state-of-the-art non-\nMoE transformers, such as Swin-T / CvT-13 / Swin-L, we get +1.1 / 0.9 / 1.0\nmIoU gain on ADE20K segmentation and +1.4 / 1.6 / 0.6 AP gain on MS-COCO object\ndetection task with less than 3% additional training cost.", + "authors": "Lemeng Wu, Mengchen Liu, Yinpeng Chen, Dongdong Chen, Xiyang Dai, Lu Yuan", + "published": "2022-04-20", + "updated": "2022-10-04", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2003.02389v1", + "title": "Comparing Rewinding and Fine-tuning in Neural Network Pruning", + "abstract": "Many neural network pruning algorithms proceed in three steps: train the\nnetwork to completion, remove unwanted structure to compress the network, and\nretrain the remaining structure to recover lost accuracy. The standard\nretraining technique, fine-tuning, trains the unpruned weights from their final\ntrained values using a small fixed learning rate. In this paper, we compare\nfine-tuning to alternative retraining techniques. Weight rewinding (as proposed\nby Frankle et al., (2019)), rewinds unpruned weights to their values from\nearlier in training and retrains them from there using the original training\nschedule. Learning rate rewinding (which we propose) trains the unpruned\nweights from their final values using the same learning rate schedule as weight\nrewinding. Both rewinding techniques outperform fine-tuning, forming the basis\nof a network-agnostic pruning algorithm that matches the accuracy and\ncompression ratios of several more network-specific state-of-the-art\ntechniques.", + "authors": "Alex Renda, Jonathan Frankle, Michael Carbin", + "published": "2020-03-05", + "updated": "2020-03-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1908.08932v2", + "title": "Learning Filter Basis for Convolutional Neural Network Compression", + "abstract": "Convolutional neural networks (CNNs) based solutions have achieved\nstate-of-the-art performances for many computer vision tasks, including\nclassification and super-resolution of images. Usually the success of these\nmethods comes with a cost of millions of parameters due to stacking deep\nconvolutional layers. Moreover, quite a large number of filters are also used\nfor a single convolutional layer, which exaggerates the parameter burden of\ncurrent methods. Thus, in this paper, we try to reduce the number of parameters\nof CNNs by learning a basis of the filters in convolutional layers. For the\nforward pass, the learned basis is used to approximate the original filters and\nthen used as parameters for the convolutional layers. We validate our proposed\nsolution for multiple CNN architectures on image classification and image\nsuper-resolution benchmarks and compare favorably to the existing\nstate-of-the-art in terms of reduction of parameters and preservation of\naccuracy.", + "authors": "Yawei Li, Shuhang Gu, Luc Van Gool, Radu Timofte", + "published": "2019-08-23", + "updated": "2019-12-23", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.14793v1", + "title": "M$^3$ViT: Mixture-of-Experts Vision Transformer for Efficient Multi-task Learning with Model-Accelerator Co-design", + "abstract": "Multi-task learning (MTL) encapsulates multiple learned tasks in a single\nmodel and often lets those tasks learn better jointly. However, when deploying\nMTL onto those real-world systems that are often resource-constrained or\nlatency-sensitive, two prominent challenges arise: (i) during training,\nsimultaneously optimizing all tasks is often difficult due to gradient\nconflicts across tasks; (ii) at inference, current MTL regimes have to activate\nnearly the entire model even to just execute a single task. Yet most real\nsystems demand only one or two tasks at each moment, and switch between tasks\nas needed: therefore such all tasks activated inference is also highly\ninefficient and non-scalable. In this paper, we present a model-accelerator\nco-design framework to enable efficient on-device MTL. Our framework, dubbed\nM$^3$ViT, customizes mixture-of-experts (MoE) layers into a vision transformer\n(ViT) backbone for MTL, and sparsely activates task-specific experts during\ntraining. Then at inference with any task of interest, the same design allows\nfor activating only the task-corresponding sparse expert pathway, instead of\nthe full model. Our new model design is further enhanced by hardware-level\ninnovations, in particular, a novel computation reordering scheme tailored for\nmemory-constrained MTL that achieves zero-overhead switching between tasks and\ncan scale to any number of experts. When executing single-task inference,\nM$^{3}$ViT achieves higher accuracies than encoder-focused MTL methods, while\nsignificantly reducing 88% inference FLOPs. When implemented on a hardware\nplatform of one Xilinx ZCU104 FPGA, our co-design framework reduces the memory\nrequirement by 2.4 times, while achieving energy efficiency up to 9.23 times\nhigher than a comparable FPGA baseline. Code is available at:\nhttps://github.com/VITA-Group/M3ViT.", + "authors": "Hanxue Liang, Zhiwen Fan, Rishov Sarkar, Ziyu Jiang, Tianlong Chen, Kai Zou, Yu Cheng, Cong Hao, Zhangyang Wang", + "published": "2022-10-26", + "updated": "2022-10-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1810.05270v2", + "title": "Rethinking the Value of Network Pruning", + "abstract": "Network pruning is widely used for reducing the heavy inference cost of deep\nmodels in low-resource settings. A typical pruning algorithm is a three-stage\npipeline, i.e., training (a large model), pruning and fine-tuning. During\npruning, according to a certain criterion, redundant weights are pruned and\nimportant weights are kept to best preserve the accuracy. In this work, we make\nseveral surprising observations which contradict common beliefs. For all\nstate-of-the-art structured pruning algorithms we examined, fine-tuning a\npruned model only gives comparable or worse performance than training that\nmodel with randomly initialized weights. For pruning algorithms which assume a\npredefined target network architecture, one can get rid of the full pipeline\nand directly train the target network from scratch. Our observations are\nconsistent for multiple network architectures, datasets, and tasks, which imply\nthat: 1) training a large, over-parameterized model is often not necessary to\nobtain an efficient final model, 2) learned \"important\" weights of the large\nmodel are typically not useful for the small pruned model, 3) the pruned\narchitecture itself, rather than a set of inherited \"important\" weights, is\nmore crucial to the efficiency in the final model, which suggests that in some\ncases pruning can be useful as an architecture search paradigm. Our results\nsuggest the need for more careful baseline evaluations in future research on\nstructured pruning methods. We also compare with the \"Lottery Ticket\nHypothesis\" (Frankle & Carbin 2019), and find that with optimal learning rate,\nthe \"winning ticket\" initialization as used in Frankle & Carbin (2019) does not\nbring improvement over random initialization.", + "authors": "Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, Trevor Darrell", + "published": "2018-10-11", + "updated": "2019-03-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1904.02920v5", + "title": "Branched Multi-Task Networks: Deciding What Layers To Share", + "abstract": "In the context of multi-task learning, neural networks with branched\narchitectures have often been employed to jointly tackle the tasks at hand.\nSuch ramified networks typically start with a number of shared layers, after\nwhich different tasks branch out into their own sequence of layers.\nUnderstandably, as the number of possible network configurations is\ncombinatorially large, deciding what layers to share and where to branch out\nbecomes cumbersome. Prior works have either relied on ad hoc methods to\ndetermine the level of layer sharing, which is suboptimal, or utilized neural\narchitecture search techniques to establish the network design, which is\nconsiderably expensive. In this paper, we go beyond these limitations and\npropose an approach to automatically construct branched multi-task networks, by\nleveraging the employed tasks' affinities. Given a specific budget, i.e. number\nof learnable parameters, the proposed approach generates architectures, in\nwhich shallow layers are task-agnostic, whereas deeper ones gradually grow more\ntask-specific. Extensive experimental analysis across numerous, diverse\nmulti-tasking datasets shows that, for a given budget, our method consistently\nyields networks with the highest performance, while for a certain performance\nthreshold it requires the least amount of learnable parameters.", + "authors": "Simon Vandenhende, Stamatios Georgoulis, Bert De Brabandere, Luc Van Gool", + "published": "2019-04-05", + "updated": "2020-08-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1909.04860v1", + "title": "Deep Elastic Networks with Model Selection for Multi-Task Learning", + "abstract": "In this work, we consider the problem of instance-wise dynamic network model\nselection for multi-task learning. To this end, we propose an efficient\napproach to exploit a compact but accurate model in a backbone architecture for\neach instance of all tasks. The proposed method consists of an estimator and a\nselector. The estimator is based on a backbone architecture and structured\nhierarchically. It can produce multiple different network models of different\nconfigurations in a hierarchical structure. The selector chooses a model\ndynamically from a pool of candidate models given an input instance. The\nselector is a relatively small-size network consisting of a few layers, which\nestimates a probability distribution over the candidate models when an input\ninstance of a task is given. Both estimator and selector are jointly trained in\na unified learning framework in conjunction with a sampling-based learning\nstrategy, without additional computation steps. We demonstrate the proposed\napproach for several image classification tasks compared to existing approaches\nperforming model selection or learning multiple tasks. Experimental results\nshow that our approach gives not only outstanding performance compared to other\ncompetitors but also the versatility to perform instance-wise model selection\nfor multiple tasks.", + "authors": "Chanho Ahn, Eunwoo Kim, Songhwai Oh", + "published": "2019-09-11", + "updated": "2019-09-11", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1701.06538v1", + "title": "Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer", + "abstract": "The capacity of a neural network to absorb information is limited by its\nnumber of parameters. Conditional computation, where parts of the network are\nactive on a per-example basis, has been proposed in theory as a way of\ndramatically increasing model capacity without a proportional increase in\ncomputation. In practice, however, there are significant algorithmic and\nperformance challenges. In this work, we address these challenges and finally\nrealize the promise of conditional computation, achieving greater than 1000x\nimprovements in model capacity with only minor losses in computational\nefficiency on modern GPU clusters. We introduce a Sparsely-Gated\nMixture-of-Experts layer (MoE), consisting of up to thousands of feed-forward\nsub-networks. A trainable gating network determines a sparse combination of\nthese experts to use for each example. We apply the MoE to the tasks of\nlanguage modeling and machine translation, where model capacity is critical for\nabsorbing the vast quantities of knowledge available in the training corpora.\nWe present model architectures in which a MoE with up to 137 billion parameters\nis applied convolutionally between stacked LSTM layers. On large language\nmodeling and machine translation benchmarks, these models achieve significantly\nbetter results than state-of-the-art at lower computational cost.", + "authors": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean", + "published": "2017-01-23", + "updated": "2017-01-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "cs.NE", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1908.09597v1", + "title": "Stochastic Filter Groups for Multi-Task CNNs: Learning Specialist and Generalist Convolution Kernels", + "abstract": "The performance of multi-task learning in Convolutional Neural Networks\n(CNNs) hinges on the design of feature sharing between tasks within the\narchitecture. The number of possible sharing patterns are combinatorial in the\ndepth of the network and the number of tasks, and thus hand-crafting an\narchitecture, purely based on the human intuitions of task relationships can be\ntime-consuming and suboptimal. In this paper, we present a probabilistic\napproach to learning task-specific and shared representations in CNNs for\nmulti-task learning. Specifically, we propose \"stochastic filter groups''\n(SFG), a mechanism to assign convolution kernels in each layer to \"specialist''\nor \"generalist'' groups, which are specific to or shared across different\ntasks, respectively. The SFG modules determine the connectivity between layers\nand the structures of task-specific and shared representations in the network.\nWe employ variational inference to learn the posterior distribution over the\npossible grouping of kernels and network parameters. Experiments demonstrate\nthat the proposed method generalises across multiple tasks and shows improved\nperformance over baseline methods.", + "authors": "Felix J. S. Bragman, Ryutaro Tanno, Sebastien Ourselin, Daniel C. Alexander, M. Jorge Cardoso", + "published": "2019-08-26", + "updated": "2019-08-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2305.03288v2", + "title": "Demystifying Softmax Gating Function in Gaussian Mixture of Experts", + "abstract": "Understanding the parameter estimation of softmax gating Gaussian mixture of\nexperts has remained a long-standing open problem in the literature. It is\nmainly due to three fundamental theoretical challenges associated with the\nsoftmax gating function: (i) the identifiability only up to the translation of\nparameters; (ii) the intrinsic interaction via partial differential equations\nbetween the softmax gating and the expert functions in the Gaussian density;\n(iii) the complex dependence between the numerator and denominator of the\nconditional density of softmax gating Gaussian mixture of experts. We resolve\nthese challenges by proposing novel Voronoi loss functions among parameters and\nestablishing the convergence rates of maximum likelihood estimator (MLE) for\nsolving parameter estimation in these models. When the true number of experts\nis unknown and over-specified, our findings show a connection between the\nconvergence rate of the MLE and a solvability problem of a system of polynomial\nequations.", + "authors": "Huy Nguyen, TrungTin Nguyen, Nhat Ho", + "published": "2023-05-05", + "updated": "2023-10-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "math.ST", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.11706v1", + "title": "Mixture of ELM based experts with trainable gating network", + "abstract": "Mixture of experts method is a neural network based ensemble learning that\nhas great ability to improve the overall classification accuracy. This method\nis based on the divide and conquer principle, in which the problem space is\ndivided between several experts by supervisition of gating network. In this\npaper, we propose an ensemble learning method based on mixture of experts which\nis named mixture of ELM based experts with trainable gating network (MEETG) to\nimprove the computing cost and to speed up the learning process of ME. The\nstructure of ME consists of multi layer perceptrons (MLPs) as base experts and\ngating network, in which gradient-based learning algorithm is applied for\ntraining the MLPs which is an iterative and time consuming process. In order to\novercome on these problems, we use the advantages of extreme learning machine\n(ELM) for designing the structure of ME. ELM as a learning algorithm for single\nhidden-layer feed forward neural networks provides much faster learning process\nand better generalization ability in comparision with some other traditional\nlearning algorithms. Also, in the proposed method a trainable gating network is\napplied to aggregate the outputs of the experts dynamically according to the\ninput sample. Our experimental results and statistical analysis on 11 benchmark\ndatasets confirm that MEETG has an acceptable performance in classification\nproblems. Furthermore, our experimental results show that the proposed approach\noutperforms the original ELM on prediction stability and classification\naccuracy.", + "authors": "Laleh Armi, Elham Abbasi, Jamal Zarepour-Ahmadabadi", + "published": "2021-05-25", + "updated": "2021-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2207.09094v1", + "title": "MoEC: Mixture of Expert Clusters", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its\npromising scaling capability with affordable computational overhead. MoE\nconverts dense layers into sparse experts, and utilizes a gated routing network\nto make experts conditionally activated. However, as the number of experts\ngrows, MoE with outrageous parameters suffers from overfitting and sparse data\nallocation. Such problems are especially severe on tasks with limited data,\nthus hindering the progress for MoE models to improve performance by scaling\nup. In this work, we propose Mixture of Expert Clusters - a general approach to\nenable expert layers to learn more diverse and appropriate knowledge by\nimposing variance-based constraints on the routing stage. We further propose a\ncluster-level expert dropout strategy specifically designed for the expert\ncluster structure. Our experiments reveal that MoEC could improve performance\non machine translation and natural language understanding tasks, and raise the\nperformance upper bound for scaling up experts under limited data. We also\nverify that MoEC plays a positive role in mitigating overfitting and sparse\ndata allocation.", + "authors": "Yuan Xie, Shaohan Huang, Tianyu Chen, Furu Wei", + "published": "2022-07-19", + "updated": "2022-07-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.03994v1", + "title": "Video Relationship Detection Using Mixture of Experts", + "abstract": "Machine comprehension of visual information from images and videos by neural\nnetworks faces two primary challenges. Firstly, there exists a computational\nand inference gap in connecting vision and language, making it difficult to\naccurately determine which object a given agent acts on and represent it\nthrough language. Secondly, classifiers trained by a single, monolithic neural\nnetwork often lack stability and generalization. To overcome these challenges,\nwe introduce MoE-VRD, a novel approach to visual relationship detection\nutilizing a mixture of experts. MoE-VRD identifies language triplets in the\nform of < subject, predicate, object> tuples to extract relationships from\nvisual processing. Leveraging recent advancements in visual relationship\ndetection, MoE-VRD addresses the requirement for action recognition in\nestablishing relationships between subjects (acting) and objects (being acted\nupon). In contrast to single monolithic networks, MoE-VRD employs multiple\nsmall models as experts, whose outputs are aggregated. Each expert in MoE-VRD\nspecializes in visual relationship learning and object tagging. By utilizing a\nsparsely-gated mixture of experts, MoE-VRD enables conditional computation and\nsignificantly enhances neural network capacity without increasing computational\ncomplexity. Our experimental results demonstrate that the conditional\ncomputation capabilities and scalability of the mixture-of-experts approach\nlead to superior performance in visual relationship detection compared to\nstate-of-the-art methods.", + "authors": "Ala Shaabana, Zahra Gharaee, Paul Fieguth", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.14976v4", + "title": "MoCaE: Mixture of Calibrated Experts Significantly Improves Object Detection", + "abstract": "Combining the strengths of many existing predictors to obtain a Mixture of\nExperts which is superior to its individual components is an effective way to\nimprove the performance without having to develop new architectures or train a\nmodel from scratch. However, surprisingly, we find that na\\\"ively combining\nexpert object detectors in a similar way to Deep Ensembles, can often lead to\ndegraded performance. We identify that the primary cause of this issue is that\nthe predictions of the experts do not match their performance, a term referred\nto as miscalibration. Consequently, the most confident detector dominates the\nfinal predictions, preventing the mixture from leveraging all the predictions\nfrom the experts appropriately. To address this, when constructing the Mixture\nof Experts, we propose to combine their predictions in a manner which reflects\nthe individual performance of the experts; an objective we achieve by first\ncalibrating the predictions before filtering and refining them. We term this\napproach the Mixture of Calibrated Experts and demonstrate its effectiveness\nthrough extensive experiments on 5 different detection tasks using a variety of\ndetectors, showing that it: (i) improves object detectors on COCO and instance\nsegmentation methods on LVIS by up to $\\sim 2.5$ AP; (ii) reaches\nstate-of-the-art on COCO test-dev with $65.1$ AP and on DOTA with $82.62$\n$\\mathrm{AP_{50}}$; (iii) outperforms single models consistently on recent\ndetection tasks such as Open Vocabulary Object Detection.", + "authors": "Kemal Oksuz, Selim Kuzucu, Tom Joy, Puneet K. Dokania", + "published": "2023-09-26", + "updated": "2024-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2206.00277v2", + "title": "Task-Specific Expert Pruning for Sparse Mixture-of-Experts", + "abstract": "The sparse Mixture-of-Experts (MoE) model is powerful for large-scale\npre-training and has achieved promising results due to its model capacity.\nHowever, with trillions of parameters, MoE is hard to be deployed on cloud or\nmobile environment. The inference of MoE requires expert parallelism, which is\nnot hardware-friendly and communication expensive. Especially for\nresource-limited downstream tasks, such sparse structure has to sacrifice a lot\nof computing efficiency for limited performance gains. In this work, we observe\nmost experts contribute scarcely little to the MoE fine-tuning and inference.\nWe further propose a general method to progressively drop the non-professional\nexperts for the target downstream task, which preserves the benefits of MoE\nwhile reducing the MoE model into one single-expert dense model. Our\nexperiments reveal that the fine-tuned single-expert model could preserve 99.3%\nbenefits from MoE across six different types of tasks while enjoying 2x\ninference speed with free communication cost.", + "authors": "Tianyu Chen, Shaohan Huang, Yuan Xie, Binxing Jiao, Daxin Jiang, Haoyi Zhou, Jianxin Li, Furu Wei", + "published": "2022-06-01", + "updated": "2022-06-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.04377v2", + "title": "Convergence Rates for Gaussian Mixtures of Experts", + "abstract": "We provide a theoretical treatment of over-specified Gaussian mixtures of\nexperts with covariate-free gating networks. We establish the convergence rates\nof the maximum likelihood estimation (MLE) for these models. Our proof\ntechnique is based on a novel notion of \\emph{algebraic independence} of the\nexpert functions. Drawing on optimal transport theory, we establish a\nconnection between the algebraic independence and a certain class of partial\ndifferential equations (PDEs). Exploiting this connection allows us to derive\nconvergence rates and minimax lower bounds for parameter estimation.", + "authors": "Nhat Ho, Chiao-Yu Yang, Michael I. Jordan", + "published": "2019-07-09", + "updated": "2022-03-08", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "cs.LG", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2303.06318v2", + "title": "A Hybrid Tensor-Expert-Data Parallelism Approach to Optimize Mixture-of-Experts Training", + "abstract": "Mixture-of-Experts (MoE) is a neural network architecture that adds sparsely\nactivated expert blocks to a base model, increasing the number of parameters\nwithout impacting computational costs. However, current distributed deep\nlearning frameworks are limited in their ability to train high-quality MoE\nmodels with large base models. In this work, we present DeepSpeed-TED, a novel,\nthree-dimensional, hybrid parallel algorithm that combines data, tensor, and\nexpert parallelism to enable the training of MoE models with 4 to 8x larger\nbase models than the current state-of-the-art. We also describe memory\noptimizations in the optimizer step, and communication optimizations that\neliminate unnecessary data movement. We implement our approach in DeepSpeed and\nachieve speedups of 26% over a baseline (i.e. without our communication\noptimizations) when training a 40 billion parameter MoE model (6.7 billion base\nmodel with 16 experts) on 128 V100 GPUs.", + "authors": "Siddharth Singh, Olatunji Ruwase, Ammar Ahmad Awan, Samyam Rajbhandari, Yuxiong He, Abhinav Bhatele", + "published": "2023-03-11", + "updated": "2023-05-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC", + "cs.PF" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2004.03751v4", + "title": "Robust Fitting of Mixture Models using Weighted Complete Estimating Equations", + "abstract": "Mixture modeling, which considers the potential heterogeneity in data, is\nwidely adopted for classification and clustering problems. Mixture models can\nbe estimated using the Expectation-Maximization algorithm, which works with the\ncomplete estimating equations conditioned by the latent membership variables of\nthe cluster assignment based on the hierarchical expression of mixture models.\nHowever, when the mixture components have light tails such as a normal\ndistribution, the mixture model can be sensitive to outliers. This study\nproposes a method of weighted complete estimating equations (WCE) for the\nrobust fitting of mixture models. Our WCE introduces weights to complete\nestimating equations such that the weights can automatically downweight the\noutliers. The weights are constructed similarly to the density power divergence\nfor mixture models, but in our WCE, they depend only on the component\ndistributions and not on the whole mixture. A novel\nexpectation-estimating-equation (EEE) algorithm is also developed to solve the\nWCE. For illustrative purposes, a multivariate Gaussian mixture, a mixture of\nexperts, and a multivariate skew normal mixture are considered, and how our EEE\nalgorithm can be implemented for these specific models is described. The\nnumerical performance of the proposed robust estimation method was examined\nusing simulated and real datasets.", + "authors": "Shonosuke Sugasawa, Genya Kobayashi", + "published": "2020-04-08", + "updated": "2022-03-17", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.02043v1", + "title": "mixdistreg: An R Package for Fitting Mixture of Experts Distributional Regression with Adaptive First-order Methods", + "abstract": "This paper presents a high-level description of the R software package\nmixdistreg to fit mixture of experts distributional regression models. The\nproposed framework is implemented in R using the deepregression software\ntemplate, which is based on TensorFlow and follows the neural structured\nadditive learning principle. The software comprises various approaches as\nspecial cases, including mixture density networks and mixture regression\napproaches. Various code examples are given to demonstrate the package's\nfunctionality.", + "authors": "David R\u00fcgamer", + "published": "2023-02-04", + "updated": "2023-02-04", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1605.01652v1", + "title": "LSTM-based Mixture-of-Experts for Knowledge-Aware Dialogues", + "abstract": "We introduce an LSTM-based method for dynamically integrating several\nword-prediction experts to obtain a conditional language model which can be\ngood simultaneously at several subtasks. We illustrate this general approach\nwith an application to dialogue where we integrate a neural chat model, good at\nconversational aspects, with a neural question-answering model, good at\nretrieving precise information from a knowledge-base, and show how the\nintegration combines the strengths of the independent components. We hope that\nthis focused contribution will attract attention on the benefits of using such\nmixtures of experts in NLP.", + "authors": "Phong Le, Marc Dymetman, Jean-Michel Renders", + "published": "2016-05-05", + "updated": "2016-05-05", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.16710v1", + "title": "Prediction Sets for High-Dimensional Mixture of Experts Models", + "abstract": "Large datasets make it possible to build predictive models that can capture\nheterogenous relationships between the response variable and features. The\nmixture of high-dimensional linear experts model posits that observations come\nfrom a mixture of high-dimensional linear regression models, where the mixture\nweights are themselves feature-dependent. In this paper, we show how to\nconstruct valid prediction sets for an $\\ell_1$-penalized mixture of experts\nmodel in the high-dimensional setting. We make use of a debiasing procedure to\naccount for the bias induced by the penalization and propose a novel strategy\nfor combining intervals to form a prediction set with coverage guarantees in\nthe mixture setting. Synthetic examples and an application to the prediction of\ncritical temperatures of superconducting materials show our method to have\nreliable practical performance.", + "authors": "Adel Javanmard, Simeng Shao, Jacob Bien", + "published": "2022-10-30", + "updated": "2022-10-30", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12656v2", + "title": "HyperMoE: Paying Attention to Unselected Experts in Mixture of Experts via Dynamic Transfer", + "abstract": "The Mixture of Experts (MoE) for language models has been proven effective in\naugmenting the capacity of models by dynamically routing each input token to a\nspecific subset of experts for processing. Despite the success, most existing\nmethods face a challenge for balance between sparsity and the availability of\nexpert knowledge: enhancing performance through increased use of expert\nknowledge often results in diminishing sparsity during expert selection. To\nmitigate this contradiction, we propose HyperMoE, a novel MoE framework built\nupon Hypernetworks. This framework integrates the computational processes of\nMoE with the concept of knowledge transferring in multi-task learning. Specific\nmodules generated based on the information of unselected experts serve as\nsupplementary information, which allows the knowledge of experts not selected\nto be used while maintaining selection sparsity. Our comprehensive empirical\nevaluations across multiple datasets and backbones establish that HyperMoE\nsignificantly outperforms existing MoE methods under identical conditions\nconcerning the number of experts.", + "authors": "Hao Zhao, Zihan Qiu, Huijia Wu, Zili Wang, Zhaofeng He, Jie Fu", + "published": "2024-02-20", + "updated": "2024-02-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05526v1", + "title": "Buffer Overflow in Mixture of Experts", + "abstract": "Mixture of Experts (MoE) has become a key ingredient for scaling large\nfoundation models while keeping inference costs steady. We show that expert\nrouting strategies that have cross-batch dependencies are vulnerable to\nattacks. Malicious queries can be sent to a model and can affect a model's\noutput on other benign queries if they are grouped in the same batch. We\ndemonstrate this via a proof-of-concept attack in a toy experimental setting.", + "authors": "Jamie Hayes, Ilia Shumailov, Itay Yona", + "published": "2024-02-08", + "updated": "2024-02-08", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.04693v2", + "title": "GraphMETRO: Mitigating Complex Graph Distribution Shifts via Mixture of Aligned Experts", + "abstract": "Graph data are inherently complex and heterogeneous, leading to a high\nnatural diversity of distributional shifts. However, it remains unclear how to\nbuild machine learning architectures that generalize to complex non-synthetic\ndistributional shifts naturally occurring in the real world. Here we develop\nGraphMETRO, a Graph Neural Network architecture, that reliably models natural\ndiversity and captures complex distributional shifts. GraphMETRO employs a\nMixture-of-Experts (MoE) architecture with a gating model and multiple expert\nmodels, where each expert model targets a specific distributional shift to\nproduce a shift-invariant representation, and the gating model identifies shift\ncomponents. Additionally, we design a novel objective that aligns the\nrepresentations from different expert models to ensure smooth optimization.\nGraphMETRO achieves state-of-the-art results on four datasets from GOOD\nbenchmark comprised of complex and natural real-world distribution shifts,\nimproving by 67% and 4.2% on WebKB and Twitch datasets.", + "authors": "Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, Jure Leskovec", + "published": "2023-12-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.01899v1", + "title": "MiCE: Mixture of Contrastive Experts for Unsupervised Image Clustering", + "abstract": "We present Mixture of Contrastive Experts (MiCE), a unified probabilistic\nclustering framework that simultaneously exploits the discriminative\nrepresentations learned by contrastive learning and the semantic structures\ncaptured by a latent mixture model. Motivated by the mixture of experts, MiCE\nemploys a gating function to partition an unlabeled dataset into subsets\naccording to the latent semantics and multiple experts to discriminate distinct\nsubsets of instances assigned to them in a contrastive learning manner. To\nsolve the nontrivial inference and learning problems caused by the latent\nvariables, we further develop a scalable variant of the\nExpectation-Maximization (EM) algorithm for MiCE and provide proof of the\nconvergence. Empirically, we evaluate the clustering performance of MiCE on\nfour widely adopted natural image datasets. MiCE achieves significantly better\nresults than various previous methods and a strong contrastive learning\nbaseline.", + "authors": "Tsung Wei Tsai, Chongxuan Li, Jun Zhu", + "published": "2021-05-05", + "updated": "2021-05-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1905.12969v1", + "title": "Enriched Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts probabilistically divide the input space into regions,\nwhere the assumptions of each expert, or conditional model, need only hold\nlocally. Combined with Gaussian process (GP) experts, this results in a\npowerful and highly flexible model. We focus on alternative mixtures of GP\nexperts, which model the joint distribution of the inputs and targets\nexplicitly. We highlight issues of this approach in multi-dimensional input\nspaces, namely, poor scalability and the need for an unnecessarily large number\nof experts, degrading the predictive performance and increasing uncertainty. We\nconstruct a novel model to address these issues through a nested partitioning\nscheme that automatically infers the number of components at both levels.\nMultiple response types are accommodated through a generalised GP framework,\nwhile multiple input types are included through a factorised exponential family\nstructure. We show the effectiveness of our approach in estimating a\nparsimonious probabilistic description of both synthetic data of increasing\ndimension and an Alzheimer's challenge dataset.", + "authors": "Charles W. L. Gadd, Sara Wade, Alexis Boukouvalas", + "published": "2019-05-30", + "updated": "2019-05-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.13934v1", + "title": "Functional mixture-of-experts for classification", + "abstract": "We develop a mixtures-of-experts (ME) approach to the multiclass\nclassification where the predictors are univariate functions. It consists of a\nME model in which both the gating network and the experts network are\nconstructed upon multinomial logistic activation functions with functional\ninputs. We perform a regularized maximum likelihood estimation in which the\ncoefficient functions enjoy interpretable sparsity constraints on targeted\nderivatives. We develop an EM-Lasso like algorithm to compute the regularized\nMLE and evaluate the proposed approach on simulated and real data.", + "authors": "Nhat Thien Pham, Faicel Chamroukhi", + "published": "2022-02-28", + "updated": "2022-02-28", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2011.01613v1", + "title": "Towards a Universal Gating Network for Mixtures of Experts", + "abstract": "The combination and aggregation of knowledge from multiple neural networks\ncan be commonly seen in the form of mixtures of experts. However, such\ncombinations are usually done using networks trained on the same tasks, with\nlittle mention of the combination of heterogeneous pre-trained networks,\nespecially in the data-free regime. This paper proposes multiple data-free\nmethods for the combination of heterogeneous neural networks, ranging from the\nutilization of simple output logit statistics, to training specialized gating\nnetworks. The gating networks decide whether specific inputs belong to specific\nnetworks based on the nature of the expert activations generated. The\nexperiments revealed that the gating networks, including the universal gating\napproach, constituted the most accurate approach, and therefore represent a\npragmatic step towards applications with heterogeneous mixtures of experts in a\ndata-free regime. The code for this project is hosted on github at\nhttps://github.com/cwkang1998/network-merging.", + "authors": "Chen Wen Kang, Chua Meng Hong, Tomas Maul", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.09179v3", + "title": "On the Representation Collapse of Sparse Mixture of Experts", + "abstract": "Sparse mixture of experts provides larger model capacity while requiring a\nconstant computational overhead. It employs the routing mechanism to distribute\ninput tokens to the best-matched experts according to their hidden\nrepresentations. However, learning such a routing mechanism encourages token\nclustering around expert centroids, implying a trend toward representation\ncollapse. In this work, we propose to estimate the routing scores between\ntokens and experts on a low-dimensional hypersphere. We conduct extensive\nexperiments on cross-lingual language model pre-training and fine-tuning on\ndownstream tasks. Experimental results across seven multilingual benchmarks\nshow that our method achieves consistent gains. We also present a comprehensive\nanalysis on the representation and routing behaviors of our models. Our method\nalleviates the representation collapse issue and achieves more consistent\nrouting than the baseline mixture-of-experts methods.", + "authors": "Zewen Chi, Li Dong, Shaohan Huang, Damai Dai, Shuming Ma, Barun Patra, Saksham Singhal, Payal Bajaj, Xia Song, Xian-Ling Mao, Heyan Huang, Furu Wei", + "published": "2022-04-20", + "updated": "2022-10-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2010.14260v2", + "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", + "abstract": "In this paper, we consider mixtures of two Mallows models for top-$k$\nrankings, both with the same location parameter but with different scale\nparameters, i.e., a mixture of concentric Mallows models. This situation arises\nwhen we have a heterogeneous population of voters formed by two homogeneous\npopulations, one of which is a subpopulation of expert voters while the other\nincludes the non-expert voters. We propose efficient sampling algorithms for\nMallows top-$k$ rankings. We show the identifiability of both components, and\nthe learnability of their respective parameters in this setting by, first,\nbounding the sample complexity for the Borda algorithm with top-$k$ rankings\nand second, proposing polynomial time algorithm for the separation of the\nrankings in each component. Finally, since the rank aggregation will suffer\nfrom a large amount of noise introduced by the non-expert voters, we adapt the\nBorda algorithm to be able to recover the ground truth consensus ranking which\nis especially consistent with the expert rankings.", + "authors": "Collas Fabien, Irurozki Ekhine", + "published": "2020-10-27", + "updated": "2020-11-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1703.09302v1", + "title": "Speech Enhancement using a Deep Mixture of Experts", + "abstract": "In this study we present a Deep Mixture of Experts (DMoE) neural-network\narchitecture for single microphone speech enhancement. By contrast to most\nspeech enhancement algorithms that overlook the speech variability mainly\ncaused by phoneme structure, our framework comprises a set of deep neural\nnetworks (DNNs), each one of which is an 'expert' in enhancing a given speech\ntype corresponding to a phoneme. A gating DNN determines which expert is\nassigned to a given speech segment. A speech presence probability (SPP) is then\nobtained as a weighted average of the expert SPP decisions, with the weights\ndetermined by the gating DNN. A soft spectral attenuation, based on the SPP, is\nthen applied to enhance the noisy speech signal. The experts and the gating\ncomponents of the DMoE network are trained jointly. As part of the training,\nspeech clustering into different subsets is performed in an unsupervised\nmanner. Therefore, unlike previous methods, a phoneme-labeled database is not\nrequired for the training procedure. A series of experiments with different\nnoise types verified the applicability of the new algorithm to the task of\nspeech enhancement. The proposed scheme outperforms other schemes that either\ndo not consider phoneme structure or use a simpler training methodology.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2017-03-27", + "updated": "2017-03-27", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1612.06879v1", + "title": "Robust mixture of experts modeling using the skew $t$ distribution", + "abstract": "Mixture of Experts (MoE) is a popular framework in the fields of statistics\nand machine learning for modeling heterogeneity in data for regression,\nclassification and clustering. MoE for continuous data are usually based on the\nnormal distribution. However, it is known that for data with asymmetric\nbehavior, heavy tails and atypical observations, the use of the normal\ndistribution is unsuitable. We introduce a new robust non-normal mixture of\nexperts modeling using the skew $t$ distribution. The proposed skew $t$ mixture\nof experts, named STMoE, handles these issues of the normal mixtures experts\nregarding possibly skewed, heavy-tailed and noisy data. We develop a dedicated\nexpectation conditional maximization (ECM) algorithm to estimate the model\nparameters by monotonically maximizing the observed data log-likelihood. We\ndescribe how the presented model can be used in prediction and in model-based\nclustering of regression data. Numerical experiments carried out on simulated\ndata show the effectiveness and the robustness of the proposed model in fitting\nnon-linear regression functions as well as in model-based clustering. Then, the\nproposed model is applied to the real-world data of tone perception for musical\ndata analysis, and the one of temperature anomalies for the analysis of climate\nchange data. The obtained results confirm the usefulness of the model for\npractical data analysis applications.", + "authors": "Faicel Chamroukhi", + "published": "2016-12-09", + "updated": "2016-12-09", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG", + "stat.ML", + "62, 62F, 62H30, 62h", + "G.3; I.2.6; I.5.1" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.06966v1", + "title": "Acquiring Diverse Skills using Curriculum Reinforcement Learning with Mixture of Experts", + "abstract": "Reinforcement learning (RL) is a powerful approach for acquiring a\ngood-performing policy. However, learning diverse skills is challenging in RL\ndue to the commonly used Gaussian policy parameterization. We propose\n\\textbf{Di}verse \\textbf{Skil}l \\textbf{L}earning (Di-SkilL), an RL method for\nlearning diverse skills using Mixture of Experts, where each expert formalizes\na skill as a contextual motion primitive. Di-SkilL optimizes each expert and\nits associate context distribution to a maximum entropy objective that\nincentivizes learning diverse skills in similar contexts. The per-expert\ncontext distribution enables automatic curricula learning, allowing each expert\nto focus on its best-performing sub-region of the context space. To overcome\nhard discontinuities and multi-modalities without any prior knowledge of the\nenvironment's unknown context probability space, we leverage energy-based\nmodels to represent the per-expert context distributions and demonstrate how we\ncan efficiently train them using the standard policy gradient objective. We\nshow on challenging robot simulation tasks that Di-SkilL can learn diverse and\nperformant skills.", + "authors": "Onur Celik, Aleksandar Taranovic, Gerhard Neumann", + "published": "2024-03-11", + "updated": "2024-03-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1809.04853v2", + "title": "Bayesian shrinkage in mixture of experts models: Identifying robust determinants of class membership", + "abstract": "A method for implicit variable selection in mixture of experts frameworks is\nproposed. We introduce a prior structure where information is taken from a set\nof independent covariates. Robust class membership predictors are identified\nusing a normal gamma prior. The resulting model setup is used in a finite\nmixture of Bernoulli distributions to find homogenous clusters of women in\nMozambique based on their information sources on HIV. Fully Bayesian inference\nis carried out via the implementation of a Gibbs sampler.", + "authors": "Gregor Zens", + "published": "2018-09-13", + "updated": "2019-01-12", + "primary_cat": "econ.EM", + "cats": [ + "econ.EM", + "62F15, 62J07, 62H30, 90-08" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.06066v1", + "title": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", + "abstract": "In the era of large language models, Mixture-of-Experts (MoE) is a promising\narchitecture for managing computational costs when scaling up model parameters.\nHowever, conventional MoE architectures like GShard, which activate the top-$K$\nout of $N$ experts, face challenges in ensuring expert specialization, i.e.\neach expert acquires non-overlapping and focused knowledge. In response, we\npropose the DeepSeekMoE architecture towards ultimate expert specialization. It\ninvolves two principal strategies: (1) finely segmenting the experts into $mN$\nones and activating $mK$ from them, allowing for a more flexible combination of\nactivated experts; (2) isolating $K_s$ experts as shared ones, aiming at\ncapturing common knowledge and mitigating redundancy in routed experts.\nStarting from a modest scale with 2B parameters, we demonstrate that\nDeepSeekMoE 2B achieves comparable performance with GShard 2.9B, which has 1.5\ntimes the expert parameters and computation. In addition, DeepSeekMoE 2B nearly\napproaches the performance of its dense counterpart with the same number of\ntotal parameters, which set the upper bound of MoE models. Subsequently, we\nscale up DeepSeekMoE to 16B parameters and show that it achieves comparable\nperformance with LLaMA2 7B, with only about 40% of computations. Further, our\npreliminary efforts to scale up DeepSeekMoE to 145B parameters consistently\nvalidate its substantial advantages over the GShard architecture, and show its\nperformance comparable with DeepSeek 67B, using only 28.5% (maybe even 18.2%)\nof computations.", + "authors": "Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, Wenfeng Liang", + "published": "2024-01-11", + "updated": "2024-01-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.09368v2", + "title": "Mixture-of-Experts with Expert Choice Routing", + "abstract": "Sparsely-activated Mixture-of-experts (MoE) models allow the number of\nparameters to greatly increase while keeping the amount of computation for a\ngiven token or a given sample unchanged. However, a poor expert routing\nstrategy (e.g. one resulting in load imbalance) can cause certain experts to be\nunder-trained, leading to an expert being under or over-specialized. Prior work\nallocates a fixed number of experts to each token using a top-k function\nregardless of the relative importance of different tokens. To address this, we\npropose a heterogeneous mixture-of-experts employing an expert choice method.\nInstead of letting tokens select the top-k experts, we have experts selecting\nthe top-k tokens. As a result, each token can be routed to a variable number of\nexperts and each expert can have a fixed bucket size. We systematically study\npre-training speedups using the same computational resources of the Switch\nTransformer top-1 and GShard top-2 gating of prior work and find that our\nmethod improves training convergence time by more than 2x. For the same\ncomputational cost, our method demonstrates higher performance in fine-tuning\n11 selected tasks in the GLUE and SuperGLUE benchmarks. For a smaller\nactivation cost, our method outperforms the T5 dense model in 7 out of the 11\ntasks.", + "authors": "Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, James Laudon", + "published": "2022-02-18", + "updated": "2022-10-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09832v3", + "title": "Merging Experts into One: Improving Computational Efficiency of Mixture of Experts", + "abstract": "Scaling the size of language models usually leads to remarkable advancements\nin NLP tasks. But it often comes with a price of growing computational cost.\nAlthough a sparse Mixture of Experts (MoE) can reduce the cost by activating a\nsmall subset of parameters (e.g., one expert) for each input, its computation\nescalates significantly if increasing the number of activated experts, limiting\nits practical utility. Can we retain the advantages of adding more experts\nwithout substantially increasing the computational costs? In this paper, we\nfirst demonstrate the superiority of selecting multiple experts and then\npropose a computation-efficient approach called \\textbf{\\texttt{Merging Experts\ninto One}} (MEO), which reduces the computation cost to that of a single\nexpert. Extensive experiments show that MEO significantly improves\ncomputational efficiency, e.g., FLOPS drops from 72.0G of vanilla MoE to 28.6G\n(MEO). Moreover, we propose a token-level attention block that further enhances\nthe efficiency and performance of token-level MEO, e.g., 83.3\\% (MEO) vs.\n82.6\\% (vanilla MoE) average score on the GLUE benchmark. Our code will be\nreleased upon acceptance. Code will be released at:\n\\url{https://github.com/Shwai-He/MEO}.", + "authors": "Shwai He, Run-Ze Fan, Liang Ding, Li Shen, Tianyi Zhou, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-11-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.00361v1", + "title": "AdaMoLE: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts", + "abstract": "We introduce AdaMoLE, a novel method for fine-tuning large language models\n(LLMs) through an Adaptive Mixture of Low-Rank Adaptation (LoRA) Experts.\nMoving beyond conventional methods that employ a static top-k strategy for\nactivating experts, AdaMoLE dynamically adjusts the activation threshold using\na dedicated threshold network, adaptively responding to the varying\ncomplexities of different tasks. By replacing a single LoRA in a layer with\nmultiple LoRA experts and integrating a gating function with the threshold\nmechanism, AdaMoLE effectively selects and activates the most appropriate\nexperts based on the input context. Our extensive evaluations across a variety\nof commonsense reasoning and natural language processing tasks show that\nAdaMoLE exceeds baseline performance. This enhancement highlights the\nadvantages of AdaMoLE's adaptive selection of LoRA experts, improving model\neffectiveness without a corresponding increase in the expert count. The\nexperimental validation not only confirms AdaMoLE as a robust approach for\nenhancing LLMs but also suggests valuable directions for future research in\nadaptive expert selection mechanisms, potentially broadening the scope for\noptimizing model performance across diverse language processing tasks.", + "authors": "Zefang Liu, Jiahua Luo", + "published": "2024-05-01", + "updated": "2024-05-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1409.4698v1", + "title": "A Mixtures-of-Experts Framework for Multi-Label Classification", + "abstract": "We develop a novel probabilistic approach for multi-label classification that\nis based on the mixtures-of-experts architecture combined with recently\nintroduced conditional tree-structured Bayesian networks. Our approach captures\ndifferent input-output relations from multi-label data using the efficient\ntree-structured classifiers, while the mixtures-of-experts architecture aims to\ncompensate for the tree-structured restrictions and build a more accurate\nmodel. We develop and present algorithms for learning the model from data and\nfor performing multi-label predictions on future data instances. Experiments on\nmultiple benchmark datasets demonstrate that our approach achieves highly\ncompetitive results and outperforms the existing state-of-the-art multi-label\nclassification methods.", + "authors": "Charmgil Hong, Iyad Batal, Milos Hauskrecht", + "published": "2014-09-16", + "updated": "2014-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "I.2.6" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.13850v2", + "title": "Statistical Perspective of Top-K Sparse Softmax Gating Mixture of Experts", + "abstract": "Top-K sparse softmax gating mixture of experts has been widely used for\nscaling up massive deep-learning architectures without increasing the\ncomputational cost. Despite its popularity in real-world applications, the\ntheoretical understanding of that gating function has remained an open problem.\nThe main challenge comes from the structure of the top-K sparse softmax gating\nfunction, which partitions the input space into multiple regions with distinct\nbehaviors. By focusing on a Gaussian mixture of experts, we establish\ntheoretical results on the effects of the top-K sparse softmax gating function\non both density and parameter estimations. Our results hinge upon defining\nnovel loss functions among parameters to capture different behaviors of the\ninput regions. When the true number of experts $k_{\\ast}$ is known, we\ndemonstrate that the convergence rates of density and parameter estimations are\nboth parametric on the sample size. However, when $k_{\\ast}$ becomes unknown\nand the true model is over-specified by a Gaussian mixture of $k$ experts where\n$k > k_{\\ast}$, our findings suggest that the number of experts selected from\nthe top-K sparse softmax gating function must exceed the total cardinality of a\ncertain number of Voronoi cells associated with the true parameters to\nguarantee the convergence of the density estimation. Moreover, while the\ndensity estimation rate remains parametric under this setting, the parameter\nestimation rates become substantially slow due to an intrinsic interaction\nbetween the softmax gating and expert functions.", + "authors": "Huy Nguyen, Pedram Akbarian, Fanqi Yan, Nhat Ho", + "published": "2023-09-25", + "updated": "2024-02-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2008.09662v1", + "title": "Biased Mixtures Of Experts: Enabling Computer Vision Inference Under Data Transfer Limitations", + "abstract": "We propose a novel mixture-of-experts class to optimize computer vision\nmodels in accordance with data transfer limitations at test time. Our approach\npostulates that the minimum acceptable amount of data allowing for\nhighly-accurate results can vary for different input space partitions.\nTherefore, we consider mixtures where experts require different amounts of\ndata, and train a sparse gating function to divide the input space for each\nexpert. By appropriate hyperparameter selection, our approach is able to bias\nmixtures of experts towards selecting specific experts over others. In this\nway, we show that the data transfer optimization between visual sensing and\nprocessing can be solved as a convex optimization problem.To demonstrate the\nrelation between data availability and performance, we evaluate biased mixtures\non a range of mainstream computer vision problems, namely: (i) single shot\ndetection, (ii) image super resolution, and (iii) realtime video action\nclassification. For all cases, and when experts constitute modified baselines\nto meet different limits on allowed data utility, biased mixtures significantly\noutperform previous work optimized to meet the same constraints on available\ndata.", + "authors": "Alhabib Abbas, Yiannis Andreopoulos", + "published": "2020-08-21", + "updated": "2020-08-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.IV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1901.10668v2", + "title": "Doubly Sparse: Sparse Mixture of Sparse Experts for Efficient Softmax Inference", + "abstract": "Computations for the softmax function are significantly expensive when the\nnumber of output classes is large. In this paper, we present a novel softmax\ninference speedup method, Doubly Sparse Softmax (DS-Softmax), that leverages\nsparse mixture of sparse experts to efficiently retrieve top-k classes.\nDifferent from most existing methods that require and approximate a fixed\nsoftmax, our method is learning-based and can adapt softmax weights for a\nbetter inference speedup. In particular, our method learns a two-level\nhierarchy which divides entire output class space into several partially\noverlapping experts. Each expert is sparse and only contains a subset of output\nclasses. To find top-k classes, a sparse mixture enables us to find the most\nprobable expert quickly, and the sparse expert enables us to search within a\nsmall-scale softmax. We empirically conduct evaluation on several real-world\ntasks, including neural machine translation, language modeling and image\nclassification, and demonstrate that significant computation reductions can be\nachieved at no performance loss.", + "authors": "Shun Liao, Ting Chen, Tian Lin, Denny Zhou, Chong Wang", + "published": "2019-01-30", + "updated": "2019-07-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.03292v1", + "title": "Enhancing Molecular Property Prediction via Mixture of Collaborative Experts", + "abstract": "Molecular Property Prediction (MPP) task involves predicting biochemical\nproperties based on molecular features, such as molecular graph structures,\ncontributing to the discovery of lead compounds in drug development. To address\ndata scarcity and imbalance in MPP, some studies have adopted Graph Neural\nNetworks (GNN) as an encoder to extract commonalities from molecular graphs.\nHowever, these approaches often use a separate predictor for each task,\nneglecting the shared characteristics among predictors corresponding to\ndifferent tasks. In response to this limitation, we introduce the GNN-MoCE\narchitecture. It employs the Mixture of Collaborative Experts (MoCE) as\npredictors, exploiting task commonalities while confronting the homogeneity\nissue in the expert pool and the decision dominance dilemma within the expert\ngroup. To enhance expert diversity for collaboration among all experts, the\nExpert-Specific Projection method is proposed to assign a unique projection\nperspective to each expert. To balance decision-making influence for\ncollaboration within the expert group, the Expert-Specific Loss is presented to\nintegrate individual expert loss into the weighted decision loss of the group\nfor more equitable training. Benefiting from the enhancements of MoCE in expert\ncreation, dynamic expert group formation, and experts' collaboration, our model\ndemonstrates superior performance over traditional methods on 24 MPP datasets,\nespecially in tasks with limited data or high imbalance.", + "authors": "Xu Yao, Shuang Liang, Songqiao Han, Hailiang Huang", + "published": "2023-12-06", + "updated": "2023-12-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "q-bio.QM" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.12830v1", + "title": "Mixtures of Gaussian Process Experts with SMC$^2$", + "abstract": "Gaussian processes are a key component of many flexible statistical and\nmachine learning models. However, they exhibit cubic computational complexity\nand high memory constraints due to the need of inverting and storing a full\ncovariance matrix. To circumvent this, mixtures of Gaussian process experts\nhave been considered where data points are assigned to independent experts,\nreducing the complexity by allowing inference based on smaller, local\ncovariance matrices. Moreover, mixtures of Gaussian process experts\nsubstantially enrich the model's flexibility, allowing for behaviors such as\nnon-stationarity, heteroscedasticity, and discontinuities. In this work, we\nconstruct a novel inference approach based on nested sequential Monte Carlo\nsamplers to simultaneously infer both the gating network and Gaussian process\nexpert parameters. This greatly improves inference compared to importance\nsampling, particularly in settings when a stationary Gaussian process is\ninappropriate, while still being thoroughly parallelizable.", + "authors": "Teemu H\u00e4rk\u00f6nen, Sara Wade, Kody Law, Lassi Roininen", + "published": "2022-08-26", + "updated": "2022-08-26", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2006.13309v4", + "title": "Fast Deep Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts have become an indispensable tool for flexible modelling\nin a supervised learning context, allowing not only the mean function but the\nentire density of the output to change with the inputs. Sparse Gaussian\nprocesses (GP) have shown promise as a leading candidate for the experts in\nsuch models, and in this article, we propose to design the gating network for\nselecting the experts from such mixtures of sparse GPs using a deep neural\nnetwork (DNN). Furthermore, a fast one pass algorithm called\nCluster-Classify-Regress (CCR) is leveraged to approximate the maximum a\nposteriori (MAP) estimator extremely quickly. This powerful combination of\nmodel and algorithm together delivers a novel method which is flexible, robust,\nand extremely efficient. In particular, the method is able to outperform\ncompeting methods in terms of accuracy and uncertainty quantification. The cost\nis competitive on low-dimensional and small data sets, but is significantly\nlower for higher-dimensional and big data sets. Iteratively maximizing the\ndistribution of experts given allocations and allocations given experts does\nnot provide significant improvement, which indicates that the algorithm\nachieves a good approximation to the local MAP estimator very fast. This\ninsight can be useful also in the context of other mixture of experts models.", + "authors": "Clement Etienam, Kody Law, Sara Wade, Vitaly Zankin", + "published": "2020-06-11", + "updated": "2023-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.14800v1", + "title": "Not All Experts are Equal: Efficient Expert Pruning and Skipping for Mixture-of-Experts Large Language Models", + "abstract": "A pivotal advancement in the progress of large language models (LLMs) is the\nemergence of the Mixture-of-Experts (MoE) LLMs. Compared to traditional LLMs,\nMoE LLMs can achieve higher performance with fewer parameters, but it is still\nhard to deploy them due to their immense parameter sizes. Different from\nprevious weight pruning methods that rely on specifically designed hardware,\nthis paper mainly aims to enhance the deployment efficiency of MoE LLMs by\nintroducing plug-and-play expert-level sparsification techniques. Specifically,\nwe propose, for the first time to our best knowledge, post-training approaches\nfor task-agnostic and task-specific expert pruning and skipping of MoE LLMs,\ntailored to improve deployment efficiency while maintaining model performance\nacross a wide range of tasks. Extensive experiments show that our proposed\nmethods can simultaneously reduce model sizes and increase the inference speed,\nwhile maintaining satisfactory performance. Data and code will be available at\nhttps://github.com/Lucky-Lance/Expert_Sparsity.", + "authors": "Xudong Lu, Qi Liu, Yuhui Xu, Aojun Zhou, Siyuan Huang, Bo Zhang, Junchi Yan, Hongsheng Li", + "published": "2024-02-22", + "updated": "2024-02-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12550v1", + "title": "Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization", + "abstract": "The Mixture of Experts (MoE) paradigm provides a powerful way to decompose\ninscrutable dense layers into smaller, modular computations often more amenable\nto human interpretation, debugging, and editability. A major problem however\nlies in the computational cost of scaling the number of experts to achieve\nsufficiently fine-grained specialization. In this paper, we propose the\nMultilinear Mixutre of Experts (MMoE) layer to address this, focusing on vision\nmodels. MMoE layers perform an implicit computation on prohibitively large\nweight tensors entirely in factorized form. Consequently, MMoEs both (1) avoid\nthe issues incurred through the discrete expert routing in the popular 'sparse'\nMoE models, yet (2) do not incur the restrictively high inference-time costs of\n'soft' MoE alternatives. We present both qualitative and quantitative evidence\n(through visualization and counterfactual interventions respectively) that\nscaling MMoE layers when fine-tuning foundation models for vision tasks leads\nto more specialized experts at the class-level whilst remaining competitive\nwith the performance of parameter-matched linear layer counterparts. Finally,\nwe show that learned expert specialism further facilitates manual correction of\ndemographic bias in CelebA attribute classification. Our MMoE model code is\navailable at https://github.com/james-oldfield/MMoE.", + "authors": "James Oldfield, Markos Georgopoulos, Grigorios G. Chrysos, Christos Tzelepis, Yannis Panagakis, Mihalis A. Nicolaou, Jiankang Deng, Ioannis Patras", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.05185v1", + "title": "Mixture of Weak & Strong Experts on Graphs", + "abstract": "Realistic graphs contain both rich self-features of nodes and informative\nstructures of neighborhoods, jointly handled by a GNN in the typical setup. We\npropose to decouple the two modalities by mixture of weak and strong experts\n(Mowst), where the weak expert is a light-weight Multi-layer Perceptron (MLP),\nand the strong expert is an off-the-shelf Graph Neural Network (GNN). To adapt\nthe experts' collaboration to different target nodes, we propose a \"confidence\"\nmechanism based on the dispersion of the weak expert's prediction logits. The\nstrong expert is conditionally activated when either the node's classification\nrelies on neighborhood information, or the weak expert has low model quality.\nWe reveal interesting training dynamics by analyzing the influence of the\nconfidence function on loss: our training algorithm encourages the\nspecialization of each expert by effectively generating soft splitting of the\ngraph. In addition, our \"confidence\" design imposes a desirable bias toward the\nstrong expert to benefit from GNN's better generalization capability. Mowst is\neasy to optimize and achieves strong expressive power, with a computation cost\ncomparable to a single GNN. Empirically, Mowst shows significant accuracy\nimprovement on 6 standard node classification benchmarks (including both\nhomophilous and heterophilous graphs).", + "authors": "Hanqing Zeng, Hanjia Lyu, Diyi Hu, Yinglong Xia, Jiebo Luo", + "published": "2023-11-09", + "updated": "2023-11-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.12379v4", + "title": "Mixture of Cluster-conditional LoRA Experts for Vision-language Instruction Tuning", + "abstract": "Instruction tuning of Large Vision-language Models (LVLMs) has revolutionized\nthe development of versatile models with zero-shot generalization across a wide\nrange of downstream vision-language tasks. However, the diversity of training\ntasks of different sources and formats would lead to inevitable task conflicts,\nwhere different tasks conflict for the same set of model parameters, resulting\nin sub-optimal instructionfollowing abilities. To address that, we propose the\nMixture of Clusterconditional LoRA Experts (MoCLE), a novel Mixture of Experts\n(MoE) architecture designed to activate the task-customized model parameters\nbased on the instruction clusters. A separate universal expert is further\nincorporated to improve generalization capabilities of MoCLE for novel\ninstructions. Extensive experiments on 11 zero-shot tasks demonstrate the\neffectiveness of MoCLE.", + "authors": "Yunhao Gou, Zhili Liu, Kai Chen, Lanqing Hong, Hang Xu, Aoxue Li, Dit-Yan Yeung, James T. Kwok, Yu Zhang", + "published": "2023-12-19", + "updated": "2024-03-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.01334v2", + "title": "Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy", + "abstract": "Sparsely activated Mixture-of-Experts (SMoE) has shown promise to scale up\nthe learning capacity of neural networks, however, they have issues like (a)\nHigh Memory Usage, due to duplication of the network layers into multiple\ncopies as experts; and (b) Redundancy in Experts, as common learning-based\nrouting policies suffer from representational collapse. Therefore, vanilla SMoE\nmodels are memory inefficient and non-scalable, especially for\nresource-constrained downstream scenarios. In this paper, we ask: Can we craft\na compact SMoE model by consolidating expert information? What is the best\nrecipe to merge multiple experts into fewer but more knowledgeable experts? Our\npilot investigation reveals that conventional model merging methods fail to be\neffective in such expert merging for SMoE. The potential reasons are: (1)\nredundant information overshadows critical experts; (2) appropriate neuron\npermutation for each expert is missing to bring all of them in alignment. To\naddress this, we propose M-SMoE, which leverages routing statistics to guide\nexpert merging. Specifically, it starts with neuron permutation alignment for\nexperts; then, dominant experts and their \"group members\" are formed; lastly,\nevery expert group is merged into a single expert by utilizing each expert's\nactivation frequency as their weight for merging, thus diminishing the impact\nof insignificant experts. Moreover, we observed that our proposed merging\npromotes a low dimensionality in the merged expert's weight space, naturally\npaving the way for additional compression. Hence, our final method, MC-SMoE\n(i.e., Merge, then Compress SMoE), further decomposes the merged experts into\nlow-rank and structural sparse alternatives. Extensive experiments across 8\nbenchmarks validate the effectiveness of MC-SMoE. For instance, our MC-SMoE\nachieves up to 80% memory and a 20% FLOPs reduction, with virtually no loss in\nperformance.", + "authors": "Pingzhi Li, Zhenyu Zhang, Prateek Yadav, Yi-Lin Sung, Yu Cheng, Mohit Bansal, Tianlong Chen", + "published": "2023-10-02", + "updated": "2024-03-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09762v1", + "title": "Diversifying the Mixture-of-Experts Representation for Language Models with Orthogonal Optimizer", + "abstract": "The Mixture of Experts (MoE) has emerged as a highly successful technique in\ndeep learning, based on the principle of divide-and-conquer to maximize model\ncapacity without significant additional computational cost. Even in the era of\nlarge-scale language models (LLMs), MoE continues to play a crucial role, as\nsome researchers have indicated that GPT-4 adopts the MoE structure to ensure\ndiverse inference results. However, MoE is susceptible to performance\ndegeneracy, particularly evident in the issues of imbalance and homogeneous\nrepresentation among experts. While previous studies have extensively addressed\nthe problem of imbalance, the challenge of homogeneous representation remains\nunresolved. In this study, we shed light on the homogeneous representation\nproblem, wherein experts in the MoE fail to specialize and lack diversity,\nleading to frustratingly high similarities in their representations (up to 99%\nin a well-performed MoE model). This problem restricts the expressive power of\nthe MoE and, we argue, contradicts its original intention. To tackle this\nissue, we propose a straightforward yet highly effective solution: OMoE, an\northogonal expert optimizer. Additionally, we introduce an alternating training\nstrategy that encourages each expert to update in a direction orthogonal to the\nsubspace spanned by other experts. Our algorithm facilitates MoE training in\ntwo key ways: firstly, it explicitly enhances representation diversity, and\nsecondly, it implicitly fosters interaction between experts during orthogonal\nweights computation. Through extensive experiments, we demonstrate that our\nproposed optimization algorithm significantly improves the performance of\nfine-tuning the MoE model on the GLUE benchmark, SuperGLUE benchmark,\nquestion-answering task, and name entity recognition tasks.", + "authors": "Boan Liu, Liang Ding, Li Shen, Keqin Peng, Yu Cao, Dazhao Cheng, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-10-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.11449v2", + "title": "Dynamic Mixture of Experts Models for Online Prediction", + "abstract": "A mixture of experts models the conditional density of a response variable\nusing a mixture of regression models with covariate-dependent mixture weights.\nWe extend the finite mixture of experts model by allowing the parameters in\nboth the mixture components and the weights to evolve in time by following\nrandom walk processes. Inference for time-varying parameters in richly\nparameterized mixture of experts models is challenging. We propose a sequential\nMonte Carlo algorithm for online inference and based on a tailored proposal\ndistribution built on ideas from linear Bayes methods and the EM algorithm. The\nmethod gives a unified treatment for mixtures with time-varying parameters,\nincluding the special case of static parameters. We assess the properties of\nthe method on simulated data and on industrial data where the aim is to predict\nsoftware faults in a continuously upgraded large-scale software project.", + "authors": "Parfait Munezero, Mattias Villani, Robert Kohn", + "published": "2021-09-23", + "updated": "2022-10-13", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO", + "stat.AP" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.09179v1", + "title": "SiRA: Sparse Mixture of Low Rank Adaptation", + "abstract": "Parameter Efficient Tuning has been an prominent approach to adapt the Large\nLanguage Model to downstream tasks. Most previous works considers adding the\ndense trainable parameters, where all parameters are used to adapt certain\ntask. We found this less effective empirically using the example of LoRA that\nintroducing more trainable parameters does not help. Motivated by this we\ninvestigate the importance of leveraging \"sparse\" computation and propose SiRA:\nsparse mixture of low rank adaption. SiRA leverages the Sparse Mixture of\nExpert(SMoE) to boost the performance of LoRA. Specifically it enforces the top\n$k$ experts routing with a capacity limit restricting the maximum number of\ntokens each expert can process. We propose a novel and simple expert dropout on\ntop of gating network to reduce the over-fitting issue. Through extensive\nexperiments, we verify SiRA performs better than LoRA and other mixture of\nexpert approaches across different single tasks and multitask settings.", + "authors": "Yun Zhu, Nevan Wichers, Chu-Cheng Lin, Xinyi Wang, Tianlong Chen, Lei Shu, Han Lu, Canoee Liu, Liangchen Luo, Jindong Chen, Lei Meng", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1806.08200v1", + "title": "Mixtures of Experts Models", + "abstract": "Mixtures of experts models provide a framework in which covariates may be\nincluded in mixture models. This is achieved by modelling the parameters of the\nmixture model as functions of the concomitant covariates. Given their mixture\nmodel foundation, mixtures of experts models possess a diverse range of\nanalytic uses, from clustering observations to capturing parameter\nheterogeneity in cross-sectional data. This chapter focuses on delineating the\nmixture of experts modelling framework and demonstrates the utility and\nflexibility of mixtures of experts models as an analytic tool.", + "authors": "Isobel Claire Gormley, Sylvia Fr\u00fchwirth-Schnatter", + "published": "2018-06-21", + "updated": "2018-06-21", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.05238v3", + "title": "Universal Simultaneous Machine Translation with Mixture-of-Experts Wait-k Policy", + "abstract": "Simultaneous machine translation (SiMT) generates translation before reading\nthe entire source sentence and hence it has to trade off between translation\nquality and latency. To fulfill the requirements of different translation\nquality and latency in practical applications, the previous methods usually\nneed to train multiple SiMT models for different latency levels, resulting in\nlarge computational costs. In this paper, we propose a universal SiMT model\nwith Mixture-of-Experts Wait-k Policy to achieve the best translation quality\nunder arbitrary latency with only one trained model. Specifically, our method\nemploys multi-head attention to accomplish the mixture of experts where each\nhead is treated as a wait-k expert with its own waiting words number, and given\na test latency and source inputs, the weights of the experts are accordingly\nadjusted to produce the best translation. Experiments on three datasets show\nthat our method outperforms all the strong baselines under different latency,\nincluding the state-of-the-art adaptive policy.", + "authors": "Shaolei Zhang, Yang Feng", + "published": "2021-09-11", + "updated": "2022-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2107.04694v1", + "title": "Lifelong Mixture of Variational Autoencoders", + "abstract": "In this paper, we propose an end-to-end lifelong learning mixture of experts.\nEach expert is implemented by a Variational Autoencoder (VAE). The experts in\nthe mixture system are jointly trained by maximizing a mixture of individual\ncomponent evidence lower bounds (MELBO) on the log-likelihood of the given\ntraining samples. The mixing coefficients in the mixture, control the\ncontributions of each expert in the goal representation. These are sampled from\na Dirichlet distribution whose parameters are determined through non-parametric\nestimation during lifelong learning. The model can learn new tasks fast when\nthese are similar to those previously learnt. The proposed Lifelong mixture of\nVAE (L-MVAE) expands its architecture with new components when learning a\ncompletely new task. After the training, our model can automatically determine\nthe relevant expert to be used when fed with new data samples. This mechanism\nbenefits both the memory efficiency and the required computational cost as only\none expert is used during the inference. The L-MVAE inference model is able to\nperform interpolation in the joint latent space across the data domains\nassociated with different tasks and is shown to be efficient for disentangled\nlearning representation.", + "authors": "Fei Ye, Adrian G. Bors", + "published": "2021-07-09", + "updated": "2021-07-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + } + ], + [ + { + "url": "http://arxiv.org/abs/2305.18295v5", + "title": "RAPHAEL: Text-to-Image Generation via Large Mixture of Diffusion Paths", + "abstract": "Text-to-image generation has recently witnessed remarkable achievements. We\nintroduce a text-conditional image diffusion model, termed RAPHAEL, to generate\nhighly artistic images, which accurately portray the text prompts, encompassing\nmultiple nouns, adjectives, and verbs. This is achieved by stacking tens of\nmixture-of-experts (MoEs) layers, i.e., space-MoE and time-MoE layers, enabling\nbillions of diffusion paths (routes) from the network input to the output. Each\npath intuitively functions as a \"painter\" for depicting a particular textual\nconcept onto a specified image region at a diffusion timestep. Comprehensive\nexperiments reveal that RAPHAEL outperforms recent cutting-edge models, such as\nStable Diffusion, ERNIE-ViLG 2.0, DeepFloyd, and DALL-E 2, in terms of both\nimage quality and aesthetic appeal. Firstly, RAPHAEL exhibits superior\nperformance in switching images across diverse styles, such as Japanese comics,\nrealism, cyberpunk, and ink illustration. Secondly, a single model with three\nbillion parameters, trained on 1,000 A100 GPUs for two months, achieves a\nstate-of-the-art zero-shot FID score of 6.61 on the COCO dataset. Furthermore,\nRAPHAEL significantly surpasses its counterparts in human evaluation on the\nViLG-300 benchmark. We believe that RAPHAEL holds the potential to propel the\nfrontiers of image generation research in both academia and industry, paving\nthe way for future breakthroughs in this rapidly evolving field. More details\ncan be found on a webpage: https://raphael-painter.github.io/.", + "authors": "Zeyue Xue, Guanglu Song, Qiushan Guo, Boxiao Liu, Zhuofan Zong, Yu Liu, Ping Luo", + "published": "2023-05-29", + "updated": "2024-03-10", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "We review related works from two perspectives, mixture-of-experts and text-to-image generation. More related works can be found in Appendix 7.4. Firstly, the Mixture-of-Experts (MoE) method [7, 8] partitions model parameters into distinct subsets, each termed an \u201cexpert\u201d. The MoE paradigm finds applicability beyond language processing tasks, extending to visual models [34] and Mixtureof-Modality-Experts within multi-modal transformers [35]. Additionally, efforts are being made to accelerate the training or inference processes for MoE [36, 37]. Secondly, text-to-image generation is to synthesize images from natural language descriptions. Early approaches relied on generative adversarial networks (GANs) [38, 39, 40, 41] to generate images. More recently, with the transformative success of transformers in generative tasks, models such as DALL-E [42], Cogview [43], and Make-A-Scene [29] have treated text-to-image generation as a sequence-to-sequence problem, utilizing auto-regressive transformers as generators and employing text/image tokens as input/output sequences. Recently, another research direction has focused on diffusion models by integrating textual conditioning within denoising steps, like Stable Diffusion [2], DALL-E 2 [3], eDiff-I [4], ERNIE-ViLG 2.0 [5], and Imagen [1]. 9", + "pre_questions": [], + "main_content": "Introduction Recent advancements in text-to-image generators, such as Imagen [1], Stable Diffusion [2], DALLE 2 [3], eDiff-I [4], and ERNIE-ViLG 2.0 [5], have yielded remarkable success and found wide applications in computer graphics, culture and art, and the generation of medical and biological data. Despite the substantial progress made in text-to-image diffusion models [1, 2, 3, 4, 5], there remains a pressing need for research to further achieve more precise alignment between text and image. As 2 illustrated in Fig.1, existing models often fail to adequately preserve textual concepts within the generated images. This is primarily due to the reliance on a classic cross-attention mechanism for integrating text descriptions into visual representations, resulting in relatively coarse control of the diffusion process, and leading to compromised results. To address this issue, we introduce RAPHAEL, a text-to-image generator, which yields images with superior artistry and fidelity compared to prior work, as demonstrated in Fig.2. RAPHAEL, an acronym that stands for \u201cdistinct image regions align with different text phases in attention learning\u201d, offers an appealing benefit not found in existing approaches. Specifically, we observe that different text concepts influence distinct image regions during the generation process [6], and the conventional cross-attention layer often struggles to preserve these varying concepts adequately in an image. To mitigate this issue, we employ a diffusion model stacking tens of mixture-of-experts (MoE) layers [7, 8], including both space-MoE and time-MoE layers. Concretely, the space-MoE layers are responsible for depicting different concepts in specific image regions, while the time-MoE layers focus on painting these concepts at different diffusion timesteps. This configuration leads to billions of diffusion paths from the network input to the output. Naturally, each path can act as a \u201cpainter\u201d responsible for rendering a particular concept to an image region at a specific timestep. The result is a more precise alignment between text tokens and image regions, enabling the generated images that accurately represent the associated text prompt. This approach sets RAPHAEL apart from existing models and even sheds light on future studies of the explainability of the generation process. Additionally, we propose an edge-supervised learning module to further enhance the image quality and aesthetic appeal of the generated images. Extensive experiments demonstrate that RAPHAEL outperforms preceding approaches, such as Stable Diffusion, ERNIE-ViLG 2.0, DeepFloyd, and DALL-E 2. (1) RAPHAEL exhibits superior performance in switching images across diverse styles, such as Japanese comics, realism, cyberpunk, and ink illustration. (2) RAPHAEL establishes a new state-of-the-art with a zero-shot FID-30k score of 6.61 on the COCO dataset. (3) RAPHAEL, a single model with three billion parameters trained on 1, 000 A100 GPUs, significantly surpasses its counterparts in human evaluation on the ViLG-300 benchmark. (4) RAPHAEL is capable of generating images with resolutions up to 4096 \u00d7 6144 with rich image contents and details, when combined with a tailor-made SR-GAN model [9]. The contributions of this work are three-fold: (i) We propose a novel text-to-image generator, RAPHAEL, which, through the implementation of several carefully-designed techniques, generates images that more accurately reflect textual prompts than previous works. (ii) We thoroughly explore RAPHAEL\u2019s potential for switching images in diverse styles, such as Japanese comics, realism, cyberpunk, and ink illustration, and for extension using LoRA [10], ControlNet [11], and SR-GAN [9]. (iii) We have released the demo of the latest version of RAPHAEL to the public*, which has been fine-tuned on more high aesthetics datasets. We believe that RAPHAEL holds the potential to advance the frontiers of image generation in both academia and industry, paving the way for future breakthroughs in this rapidly evolving field. 2 Notation and Preliminary We present the necessary notations and the Denoising Diffusion Probabilistic Model (DDPM) [12] for text-to-image generation. Given a collection of N images, denoted as {xi}N i=1, the aim is to learn a generative model, p(x), that is capable of accurately representing the underlying distribution. In forward diffusion, Gaussian noise is progressively introduced into the source images. At an arbitrary timestep t, it is possible to directly sample from the Gaussian distribution following the T-step noise schedule {\u03b1t}T t=1, without iterative forward sampling. Consequently, the noisy image at timestep t, denoted as xt, can be expressed as xt = \u221a1 \u2212\u00af \u03b1tx0 + \u221a\u00af \u03b1t\u03f5t, where \u00af \u03b1t = Qt i=1 \u03b1i. In this expression, x0 represents the source image, while \u03f5t \u223cN(0, I) indicates the Gaussian noise at step t. In the reverse process, a denoising neural network, denoted as D\u03b8(\u00b7), is employed to estimate the additive Gaussian noise. The optimization of this network is achieved by minimizing the loss function, Ldenoise = Et,x0,\u03f5\u223cN (0,I) h \u2225\u03f5 \u2212D\u03b8 (xt, t)\u22252 2 i . *https://miaohua.sensetime.com/zh-CN 3 A\u00a0wizard\u00a0by\u00a0Q\u00a0Hayashida\u00a0in\u00a0the\u00a0style\u00a0of\u00a0 Dorohedoro for\u00a0Elden\u00a0Ring,\u00a0with\u00a0biggest\u00a0 most\u00a0intricate\u00a0sword,\u00a0on\u00a0sunlit\u00a0battlefield,\u00a0 breath\u00a0of\u00a0the\u00a0wild,\u00a0striking\u00a0illustration. A\u00a0beautiful\u00a0woman\u00a0dressed\u00a0in\u00a0a\u00a0dress\u00a0 made\u00a0of\u00a0autumn\u00a0leaves\u00a0in\u00a0the\u00a0forest,\u00a0 photography,\u00a0natural\u00a0lighting,\u00a0high\u00a0 detail. Harvest\u00a0of\u00a0vegetables in\u00a0a\u00a0wooden\u00a0box\u00a0 near\u00a0the\u00a0beds vegetables\u00a0grow\u00a0naturally,\u00a0 summer\u00a0light\u00a0background,\u00a0backlight\u00a0 and\u00a0sun\u00a0rays, clean\u00a0sharp\u00a0focus. Chinese\u00a0illustration,\u00a0oriental\u00a0landscape\u00a0 painting,\u00a0above\u00a0super\u00a0wide\u00a0angle,\u00a0magical,\u00a0 romantic,\u00a0detailed,\u00a0colorful,\u00a0multi\u00ad dimensional\u00a0paper\u00a0kirigami craft. Photography\u00a0closeup\u00a0portrait\u00a0of\u00a0an\u00a0 adorable\u00a0rusty broken\u00addown\u00a0steampunk\u00a0 robot covered\u00a0in\u00a0budding\u00a0vegetation,\u00a0 surrounded\u00a0by\u00a0tall\u00a0grass,\u00a0misty\u00a0futuristic\u00a0 sci\u00adfi\u00a0forest\u00a0environment. The\u00a0Caped\u00a0Crusader,\u00a0Gotham\u00a0skyline,\u00a0rooftop,\u00a0 mysterious,\u00a0powerful,\u00a0nighttime,\u00a0mixed\u00a0media,\u00a0 expressionism,\u00a0dark\u00a0tones,\u00a0high\u00a0contrast,\u00a0in\u00a0 the\u00a0style\u00a0of\u00a0comic\u00a0book\u00a0artist\u00a0Frank\u00a0Miller,\u00a0 modern,\u00a0gritty\u00a0and\u00a0textured,\u00a0collage\u00a0technique. The\u00a0Goddess\u00a0of\u00a0high\u00a0fashion,\u00a0 impressionistic\u00a0line\u00a0art,\u00a0contrasting earth\u00a0 tones,\u00a0vibrant,\u00a0pen\u00a0and\u00a0ink illustration,\u00a0 ink\u00a0splatter,\u00a0abstract expressionism\u00a0 superimposed\u00a0onto\u00a0majestic\u00a0space\u00a0queen. A\u00a0cute\u00a0little\u00a0matte\u00a0low\u00a0poly\u00a0isometric\u00a0 Zelda\u00a0Breath\u00a0of\u00a0the\u00a0wild\u00a0forest\u00a0island,\u00a0 waterfalls,\u00a0soft\u00a0shadows,\u00a0trending\u00a0on\u00a0 Artstation,\u00a03d\u00a0render,\u00a0monument\u00a0valley,\u00a0 fez\u00a0video\u00a0game. Milkyway in\u00a0a\u00a0glass\u00a0bottle,\u00a04k,\u00a0unreal\u00a0 engine,\u00a0octane\u00a0render. Figure 2: These examples show that RAPHAEL can generate artistic images with varying text prompts across various styles. The synthesized images have rich details and semantics. The prompts were written by human artists without cherry-picking. By employing the Bayes\u2019 theorem, it is feasible to iteratively estimate the image at timestep t \u22121 through sampling from the posterior distribution, p\u03b8(xt\u22121|xt). We have xt\u22121 = 1 \u221a\u03b1t \u0010 xt \u2212 1\u2212\u03b1t \u221a1\u2212\u00af \u03b1t D\u03b8 (xt, t) \u0011 + \u03c3tz, where \u03c3t signifies the standard deviation of the newly injected noise into the image at each step, and z represents the Gaussian noise. In essence, the denoising neural network estimates the score function at varying time steps, thereby progressively recovering the structure of the image distribution. The fundamental insight provided by the DDPM lies in the fact that the perturbation of data points with noise serves to populate regions of low data density, ultimately enhancing the accuracy of estimated scores. This results in stable training and sampling. 4 1 Self Attention Cross Attention Time MoE Space MoE (a) Transformer Block Attn map M A furry bear under sky \ud835\udc3f!\"#! N x Conv (b) Space-MoE A furry bear under sky Text Gate Bear Sky Features Attn map Furry Mask Expert 1 Expert n Expert 2 \u2026 \u2744 \u2744 \ud83d\udd25 \ud83d\udd25 \ud83d\udd25 \u2744 M\u2217,# M\u2217,$ M\u2217,% M ! # M ! $ M ! % Figure 3: Framework of RAPHAEL. (a) Each block contains four primary components including a selfattention layer, a cross-attention layer, a space-MoE layer, and a time-MoE layer. The space-MoE is responsible for depicting different text concepts in specific image regions, while the time-MoE handles different diffusion timesteps. Each block uses edge-supervised cross-attention learning to further improve image quality. (b) shows details of space-MoE. For example, given a prompt \u201ca furry bear under sky\u201d, each text token and its corresponding image region (given by a binary mask) are directed through distinct space experts, i.e., each expert learns particular visual features at a region. By stacking several space-MoEs, we can easily learn to depict thousands of text concepts. U-Net with Text Prompts. The denoising network is commonly implemented using a U-Net [13] architecture, as depicted in Fig.8 in Appendix 7.3. To incorporate textual prompts (denoted by y) into the U-Net, a text encoder neural network, E\u03b8(y), is employed to extract the textual representation. The extracted text tokens are input into the U-Net through a cross-attention layer. The text tokens possess a size of ny \u00d7 dy, where ny represents the number of text tokens, and dy signifies the dimension of a text token (e.g., dy = 768 in [14]). The cross-attention layer can be formulated as attention(Q, K, V) = softmax \u0010 QK\u22a4 \u221a d \u0011 V, where Q, K, and V correspond to the query, key, and value matrices, respectively. These matrices are computed as Q = h (xt) Wqry x , K = E\u03b8(y)Wkey y , and V = E\u03b8(y)Wval y , where Wqry x \u2208Rd\u00d7d and Wkey y , Wval y \u2208Rdy\u00d7d represent the parametric projection matrices for the image and text, respectively. Additionally, d denotes the dimension of an image token, h(xt) \u2208Rnx\u00d7d indicates the flattened intermediate representation within the U-Net, with nx being the number of tokens in an image. A cross-attention map between the text and image, M = softmax \u0010 QK\u22a4 \u221a d \u0011 \u2208Rnx\u00d7ny, is defined, which plays a crucial role in the proposed approach, as described in the following sections. 3 Our Approach The overall framework of RAPHAEL is illustrated in Fig.3, with the network configuration details provided in the Appendix 7.1. Employing a U-Net architecture, the framework consists of 16 transformer blocks, each containing four components: a self-attention layer, a cross-attention layer, a space-MoE layer, and a time-MoE layer. The space-MoE is responsible for depicting different text concepts in specific image regions at a given scale, while the time-MoE handles different diffusion timesteps. 3.1 Space-MoE and Time-MoE Space-MoE. Regarding the space-MoE layer, distinct text tokens correspond to various regions within an image, as previously mentioned. For instance, when provided with the prompt \u201ca furry bear under the sky\u201d, each text token and its corresponding image region (represented by a binary mask) are fed into separate experts, as illustrated in Fig.3b. The space-MoE layer\u2019s output is the mean of all experts, calculated using the following formula: 1 ny Pny i=1 eroute(yi) \u0010 h\u2032(xt) \u25e6c Mi \u0011 . In this equation, c Mi is a binary two-dimensional matrix, indicating the image region the i-th text token should correspond to, as shown in Fig.3b. Here, \u25e6represents hadamard product, and h\u2032(xt) is the features from time-MoE. The gating (routing) function route(yi) returns the index of an expert in the space-MoE, with {e1, e2, . . . , ek} being a set of k experts. Text Gate Network. The Text Gate Network is employed to distribute an image region to a specific expert, as shown in Fig.3b. The function route(yi) = argmax (softmax (G (E\u03b8(yi)) + \u03f5)) is used, 5 Expert index: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 2 3 4 5 Expert index: Block index: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 2 3 4 5 Block index: 0 donut banana cake chair horse laptop orange pizza train aeroplane Figure 4: Left: We visualize the diffusion paths (routes) from the network input to the output, utilizing 16 space-MoE layers, each containing 6 space experts. These paths are closely associated with 100 adjectives, such as \u201cscenic\u201d, \u201cpeaceful\u201d, and \u201cmajestic\u201d, which represent the most frequently occurring adjectives for describing artworks as suggested by GPT-3.5 [15, 16]. Given that GPT-3.5 has been trained on trillions of tokens, we believe that these adjectives reflect a diverse, real-world distribution. Our findings indicate that different paths distinctively represent various adjectives. Right: We depict the diffusion paths for ten categories (i.e., nouns) within the COCO dataset. Our observations reveal that different categories activate distinct paths in a heterogeneous manner. The display colors blend together where the routes overlap. where G : Rdy 7\u2192Rk is a feed forward network, which uses a text token representation E\u03b8(yi) as input and assigns a space expert. To prevent mode collapse, random noise \u03f5 is incorporated. The argmax function ensures that one expert exclusively handles the corresponding image region for each text token, without increasing computational complexity. From Text to Image Region. Recall that M is the cross-attention map between text and image, where each element, Mj,i, represents a correspondence value between the j-th image token and the i-th text token. In the space-MoE, each entry in the binary mask c Mi equals \u201c1\u201d if Mj,i \u2265\u03b7i, otherwise \u201c0\u201d if Mj,i < \u03b7i, as illustrated in Fig.3b. A thresholding mechanism is introduced to determine the values in the mask. The threshold value \u03b7i = \u03b1 max(M\u2217,i) is defined, where max(M\u2217,i) represents the maximum correspondence between text token i and all image regions. The hyper-parameter \u03b1 will be evaluated through an ablation study. Discussions. The insight behind the space-MoE is to effectively model the intricate relationships between text tokens and their corresponding regions in the image, accurately reflecting concepts in the generated images. As illustrated in Fig.4, the employment of 16 space-MoE layers, each containing 6 experts, results in billions of spatial diffusion paths (i.e., 616 possible routes). It is evident that each diffusion path is closely associated with a specific textual concept. To investigate this further, we generate 100 prevalent adjectives that are the most frequently occurring adjectives for describing artworks as suggested by GPT-3.5 [15, 16]. Given that GPT-3.5 has been trained on trillions of tokens, we posit that these adjectives reflect a diverse, real-world distribution. We input each adjective into the RAPHAEL model with prompt templates given by GPT-3.5 to generate 100 distinct images and collect their corresponding diffusion paths. Consequently, we obtain ten thousand paths for the 100 words. By treating these pathways as features (i.e., each path is a vector of 16 entries), we train a straightforward classifier (e.g., XGBoost [17]) to categorize the words. The classifier after 5-fold cross-validation achieves over 93% accuracy for open-world adjectives, demonstrating that different diffusion paths distinctively represent various textual concepts. We observe analogous phenomena within the 80 object categories of the COCO dataset. Further details on verbs and visualization are provided in the Appendix 7.5. Time-MoE. We can further enhance the image quality by employing a time-mixture-of-experts (time-MoE) approach, which is inspired by previous works such as [4, 5]. Given that the diffusion process iteratively corrupts an image with Gaussian noise over a series of timesteps t = 1, . . . , T, the image generator is trained to denoise the images in reverse order from t = T to t = 1. All timesteps aim to denoise a noisy image, progressively transforming random noise into an artistic image. Intuitively, the difficulty of these denoising steps varies depending on the noise ratio presented in the image. For example, when t = T, the denoising network\u2019s input image xt is highly noisy. When t = 1, the image xt is closer to the original image. To address this issue, we employ a time-MoE before each space-MoE in each transformer block. In contrast to [4, 5] , which necessitate hand-crafted time expert assignments, we implement an 6 additional gate network to automatically learn to assign different timesteps to various time experts. Further details can be found in the Appendix 7.3. 3.2 Edge-supervised Learning In order to further enhance the image quality, we propose incorporating an edge-supervised learning strategy to train the transformer block. By implementing an edge detection module, we aim to extract rich boundary information from an image. These intricate boundaries can serve as supervision to guide the model in preserving detailed image features across various styles. Consider a neural network module, P\u03b8(M), with parameters of N convolutional layers (e.g., N = 5). This module is designed to predict an edge map given an attention map M (refer to Fig.7a in the Appendix 7.2). We utilize the edge map of the input image, denoted as Iedge, to supervise the network P\u03b8. Iedge can be obtained by the holistically-nested edge detection algorithm [18] (Fig.7b). Intuitively, the network P\u03b8 can be trained by minimizing the loss function, Ledge = Focal(P\u03b8(M), Iedge), where Focal(\u00b7, \u00b7) denotes the focal loss [19] employed to measure the discrepancy between the predicted and the \u201cground-truth\u201d edge maps. Moreover, as discussed in [5, 6], the attention map M is prone to becoming vague when the timestep t is large. Consequently, it is essential to adopt a timestep threshold value to inactivate (pause) edge-supervised learning when t is large. This timestep threshold value (Tc) is a hyper-parameter that will be evaluated through an ablation study. Overall, the RAPHAEL model is trained by combining two loss functions, L = Ldenoise + Ledge. As demonstrated in Fig.7d in the Appendix 7.2, edge-supervised learning substantially improves the image quality and aesthetic appeal of the generated images. 4 Experiments This section presents the experimental setups, the quantitative results compared to recent state-ofthe-art models, and the ablation study to demonstrate the effectiveness of RAPHAEL. More artistic images generated by RAPHAEL and comparisons between RAPHAEL and other diffusion models can be found in Appendix 7.6 and 7.7. Dataset. The training dataset consists of LAION-5B [20] and some internal datasets. To collect training data from LAION-5B, we filter the images using the aesthetic scorer same as Stable Diffusion [2] and remove the image-text pairs that have scores smaller than 4.7. We remove the images with watermarks either. Since the text descriptions in LAION-5B are noisy, we clean them by removing useless information such as URLs, HTML tags, and email addresses, inspired by [2, 4, 21]. Multi-scale Training. To improve text-image alignment, instead of cropping images to a fixed scale [2], we resize an image to its nearest size in a bucket, which has 9 different image scales\u2020. Additionally, the GPU resources will be automatically allocated to each bucket depending on the number of images it contains, enabling effective use of computational resources. Implementations. To reduce training and sampling complexity, we use a Variational Autoencoder (VAE) [22, 23] to compress images using Latent Diffusion Model [2]. We first pre-train an image encoder to transform an image from pixel space to a latent space, and an image decoder to convert it back. Unlike previous works, the cross-attention layers in RAPHAEL are augmented with space-MoE and time-MoE layers. The entire model is implemented in PyTorch [24], and is trained by AdamW [25] optimizer with a learning rate of 1e \u22124, a weight decay of 0, a batch size of 2, 000, on 1, 000 NVIDIA A100s for two months. More details on the hyper-parameter settings can be found in the Appendix 7.1. 4.1 Comparisons Results on COCO. Following previous works [1, 2, 4], we evaluate RAPHAEL on the COCO 256 \u00d7 256 dataset using zero-shot Frechet Inception Distance (FID), which measures the quality and diversity of images. Similar to [1, 2, 4, 5, 31], 30, 000 images are randomly selected from the validation set for evaluation. Table 1 shows that RAPHAEL achieves a new state-of-the-art \u2020The [height, width] for each bucket is [448, 832], [512, 768], [512, 704], [640, 640], [576, 640], [640, 576], [704, 512], [768, 512], and [832, 448]. 7 Table 1: Comparisons of RAPHAEL with the recent representative text-to-image generation models on the MS-COCO 256 \u00d7 256 using zero-shot FID-30k. We see that RAPHAEL outperforms all previous works in image quality, even a commercial product released recently. Approach Venue/Date Model Type FID-30K Zero-shot FID-30K DF-GAN [26] CVPR\u201922 GAN 21.42 DM-GAN + CL [27] CVPR\u201919 GAN 20.79 LAFITE [28] CVPR\u201922 GAN 8.12 Make-A-Scene [29] ECCV\u201922 Autoregressive 7.55 LDM [2] CVPR\u201922 Diffusion 12.63 GLIDE [30] ICML\u201922 Diffusion 12.24 DALL-E 2 [3] arXiv, April 2022 Diffusion 10.39 Stable Diffusion [2] CVPR\u201922 Diffusion 8.32 Muse-3B [31] arXiv, Jan. 2023 Non-Autoregressive 7.88 Imagen [1] NeurIPS\u201922 Diffusion 7.27 eDiff-I [4] arXiv, Nov. 2022 Diffusion Experts 6.95 ERNIE-ViLG 2.0 [5] CVPR\u201923 Diffusion Experts 6.75 DeepFloyd Product, May 2023 Diffusion 6.66 RAPHAEL Diffusion Experts 6.61 Figure 5: Comparisons of RAPHAEL with DALL-E 2, Stable Diffusion XL (SD XL), ERNIE-ViLG 2.0, and DeepFloyd in a user study using the ViLG-300 benchmark. We report the user\u2019s preference rates with 95% confidence intervals. We see that RAPHAEL can generate images with higher quality and better conform to the prompts. performance of text-to-image generation, with 6.61 zero-shot FID-30k on MS-COCO, surpassing prominent image generators such as Stable Diffusion, Imagen, ERNIE-ViLG 2.0, and DALL-E 2. Human Evaluations. We employ the ViLG-300 benchmark [5], a bilingual prompt set, which enables to systematically evaluate text-to-image models given various text prompts in Chinese and English. ViLG-300 allows us to convincingly compare RAPHAEL with recent-advanced models including DALL-E 2, Stable Diffusion, ERNIE-ViLG 2.0, and DeepFloyd, in terms of both image quality and text-image alignment. For example, human artists are presented with two sets of images generated by RAPHAEL and a competitor, respectively. They are asked to compare these images from two aspects respectively, including image-text alignment, and image quality and aesthetics. Throughout the entire process, human artists are unaware of which model the image is generated from. Fig.5 shows that RAPHAEL surpasses all other models in both image-text alignment and image quality in the user study, indicating that RAPHAEL can generate high-artistry images that conform to the text. Extensions to LoRA, ControlNet, and SR-GAN. RAPHAEL can be further extended by incorporating LoRA, ControlNet, and SR-GAN. In Appendix 7.8, we present a comparison between RAPHAEL and Stable Diffusion utilizing LoRA. RAPHAEL demonstrates superior robustness against overfitting compared to Stable Diffusion. We also demonstrate RAPHAEL with a canny-based ControlNet. Furthermore, by employing a tailormade SR-GAN model, we enhance the image resolution to 4096 \u00d7 6144. 4.2 Ablation Study Evaluate every module in RAPHAEL. We conduct a comprehensive assessment of each module within the RAPHAEL model, utilizing the CLIP [14] score to measure image-text alignment. Given the significance of classifier-free guidance weight in controlling image quality and text alignment, we present ablation results as trade-off curves between CLIP and FID scores across a range of guidance weights [32], specifically 1.5, 3.0, 4.5, 6.0, 7.5, and 9.0. Fig.6b compares these curves for 8 (a) Impact of ! and \" !. (c) Impact of experts. (b) FID-CLIP score curves. Figure 6: Ablation Study. (a) examines the selection of \u03b1 and Tc. (b) presents the trade-off between FID and CLIP scores for the complete RAPHAEL model and its variants without space-MoE, time-MoE, and edge-supervised learning. (c) visualizes the correlation between FID-5k and runtime complexity (measured in terms of the number of DDIM [33] steps for an image per second) as a function of the number of experts employed. Notably, the computational complexity is predominantly influenced by the number of space experts. the complete RAPHAEL model and its variants without space-MoE, edge-supervised learning, and time-MoE, respectively. Our findings indicate that all modules contribute effectively. For example, space-MoE substantially enhances the CLIP score and the optimal guidance weight for the sampler shifts from 3.0 to 4.5. Moreover, at the same guidance weight, space-MoE considerably reduces the FID, resulting in a significant improvement in image quality. Choice of \u03b1 and Tc. As depicted in Fig.6a, we observe that \u03b1 = 0.2 delivers the best performance, implying a balance between preserving adequate features and avoiding the use of the entire latent features. An appropriate threshold value for Tc terminates edge-supervised learning when the diffusion timestep is large. Our experiments reveal that a suitable choice for Tc is 500, ensuring the effective learning of texture information. Performance and Runtime Analysis on Number of Experts. We offer an examination of the number of experts, ranging from 0 to 8, in Fig.6c. For each setting, we employ 100 million training samples. Our results demonstrate that increasing the number of experts improves FID (lower values are preferable). However, adding space experts introduces additional computations, with the computational complexity bounded by the total number of experts. Once all available experts have been deployed, the computational complexity ceases to grow. In the right-hand side of Fig.6c, we provide a runtime analysis for 40 input tokens, ensuring the utilization of all space experts. For instance, when the number of experts is 6, the inference speed decreases by 24% but yields superior fidelity. This remains faster than previous diffusion models such as Imagen [1] and eDiff-I [4]. This paper introduces RAPHAEL, a novel text-conditional image diffusion model capable of generating highly-artistic images using a large-scale mixture of diffusion paths. We carefully design space-MoE and time-MoE within an edge-supervised learning framework, enabling RAPHAEL to accurately portray text prompts, enhance the alignment between textual concepts and image regions, and produce images with superior aesthetic appeal. Comprehensive experiments demonstrate that RAPHAEL surpasses previous approaches, such as Stable Diffusion, ERNIE-ViLG 2.0, DeepFloyd, and DALL-E 2, in both FID-30k and the human evaluation benchmark ViLG-300. Additionally, RAPHAEL can be extended using LoRA, ControlNet, and SR-GAN. We believe that RAPHAEL has the potential to advance image generation research in both academia and industry. Limitation and Potential Negative Societal Impact. The potential negative social impact is to use the RAPHAEL API to create images containing misleading or false information. This issue potentially presents in all powerful text-to-image generators. We will solve this issue (e.g., by prompt filtering) before releasing the API to the public." + }, + { + "url": "http://arxiv.org/abs/2106.05974v1", + "title": "Scaling Vision with Sparse Mixture of Experts", + "abstract": "Sparsely-gated Mixture of Experts networks (MoEs) have demonstrated excellent\nscalability in Natural Language Processing. In Computer Vision, however, almost\nall performant networks are \"dense\", that is, every input is processed by every\nparameter. We present a Vision MoE (V-MoE), a sparse version of the Vision\nTransformer, that is scalable and competitive with the largest dense networks.\nWhen applied to image recognition, V-MoE matches the performance of\nstate-of-the-art networks, while requiring as little as half of the compute at\ninference time. Further, we propose an extension to the routing algorithm that\ncan prioritize subsets of each input across the entire batch, leading to\nadaptive per-image compute. This allows V-MoE to trade-off performance and\ncompute smoothly at test-time. Finally, we demonstrate the potential of V-MoE\nto scale vision models, and train a 15B parameter model that attains 90.35% on\nImageNet.", + "authors": "Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, Andr\u00e9 Susano Pinto, Daniel Keysers, Neil Houlsby", + "published": "2021-06-10", + "updated": "2021-06-10", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2303.07226v1", + "title": "Scaling Vision-Language Models with Sparse Mixture of Experts", + "abstract": "The field of natural language processing (NLP) has made significant strides\nin recent years, particularly in the development of large-scale vision-language\nmodels (VLMs). These models aim to bridge the gap between text and visual\ninformation, enabling a more comprehensive understanding of multimedia data.\nHowever, as these models become larger and more complex, they also become more\nchallenging to train and deploy. One approach to addressing this challenge is\nthe use of sparsely-gated mixture-of-experts (MoE) techniques, which divide the\nmodel into smaller, specialized sub-models that can jointly solve a task. In\nthis paper, we explore the effectiveness of MoE in scaling vision-language\nmodels, demonstrating its potential to achieve state-of-the-art performance on\na range of benchmarks over dense models of equivalent computational cost. Our\nresearch offers valuable insights into stabilizing the training of MoE models,\nunderstanding the impact of MoE on model interpretability, and balancing the\ntrade-offs between compute performance when scaling VLMs. We hope our work will\ninspire further research into the use of MoE for scaling large-scale\nvision-language models and other multimodal machine learning applications.", + "authors": "Sheng Shen, Zhewei Yao, Chunyuan Li, Trevor Darrell, Kurt Keutzer, Yuxiong He", + "published": "2023-03-13", + "updated": "2023-03-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2102.12092v2", + "title": "Zero-Shot Text-to-Image Generation", + "abstract": "Text-to-image generation has traditionally focused on finding better modeling\nassumptions for training on a fixed dataset. These assumptions might involve\ncomplex architectures, auxiliary losses, or side information such as object\npart labels or segmentation masks supplied during training. We describe a\nsimple approach for this task based on a transformer that autoregressively\nmodels the text and image tokens as a single stream of data. With sufficient\ndata and scale, our approach is competitive with previous domain-specific\nmodels when evaluated in a zero-shot fashion.", + "authors": "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, Ilya Sutskever", + "published": "2021-02-24", + "updated": "2021-02-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2105.13290v3", + "title": "CogView: Mastering Text-to-Image Generation via Transformers", + "abstract": "Text-to-Image generation in the general domain has long been an open problem,\nwhich requires both a powerful generative model and cross-modal understanding.\nWe propose CogView, a 4-billion-parameter Transformer with VQ-VAE tokenizer to\nadvance this problem. We also demonstrate the finetuning strategies for various\ndownstream tasks, e.g. style learning, super-resolution, text-image ranking and\nfashion design, and methods to stabilize pretraining, e.g. eliminating NaN\nlosses. CogView achieves the state-of-the-art FID on the blurred MS COCO\ndataset, outperforming previous GAN-based models and a recent similar work\nDALL-E.", + "authors": "Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, Jie Tang", + "published": "2021-05-26", + "updated": "2021-11-05", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.13262v1", + "title": "FastMoE: A Fast Mixture-of-Expert Training System", + "abstract": "Mixture-of-Expert (MoE) presents a strong potential in enlarging the size of\nlanguage model to trillions of parameters. However, training trillion-scale MoE\nrequires algorithm and system co-design for a well-tuned high performance\ndistributed training system. Unfortunately, the only existing platform that\nmeets the requirements strongly depends on Google's hardware (TPU) and software\n(Mesh Tensorflow) stack, and is not open and available to the public,\nespecially GPU and PyTorch communities.\n In this paper, we present FastMoE, a distributed MoE training system based on\nPyTorch with common accelerators. The system provides a hierarchical interface\nfor both flexible model design and easy adaption to different applications,\nsuch as Transformer-XL and Megatron-LM. Different from direct implementation of\nMoE models using PyTorch, the training speed is highly optimized in FastMoE by\nsophisticated high-performance acceleration skills. The system supports placing\ndifferent experts on multiple GPUs across multiple nodes, enabling enlarging\nthe number of experts linearly against the number of GPUs. The source of\nFastMoE is available at https://github.com/laekov/fastmoe under Apache-2\nlicense.", + "authors": "Jiaao He, Jiezhong Qiu, Aohan Zeng, Zhilin Yang, Jidong Zhai, Jie Tang", + "published": "2021-03-24", + "updated": "2021-03-24", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "cs.DC" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2211.01324v5", + "title": "eDiff-I: Text-to-Image Diffusion Models with an Ensemble of Expert Denoisers", + "abstract": "Large-scale diffusion-based generative models have led to breakthroughs in\ntext-conditioned high-resolution image synthesis. Starting from random noise,\nsuch text-to-image diffusion models gradually synthesize images in an iterative\nfashion while conditioning on text prompts. We find that their synthesis\nbehavior qualitatively changes throughout this process: Early in sampling,\ngeneration strongly relies on the text prompt to generate text-aligned content,\nwhile later, the text conditioning is almost entirely ignored. This suggests\nthat sharing model parameters throughout the entire generation process may not\nbe ideal. Therefore, in contrast to existing works, we propose to train an\nensemble of text-to-image diffusion models specialized for different synthesis\nstages. To maintain training efficiency, we initially train a single model,\nwhich is then split into specialized models that are trained for the specific\nstages of the iterative generation process. Our ensemble of diffusion models,\ncalled eDiff-I, results in improved text alignment while maintaining the same\ninference computation cost and preserving high visual quality, outperforming\nprevious large-scale text-to-image diffusion models on the standard benchmark.\nIn addition, we train our model to exploit a variety of embeddings for\nconditioning, including the T5 text, CLIP text, and CLIP image embeddings. We\nshow that these different embeddings lead to different behaviors. Notably, the\nCLIP image embedding allows an intuitive way of transferring the style of a\nreference image to the target text-to-image output. Lastly, we show a technique\nthat enables eDiff-I's \"paint-with-words\" capability. A user can select the\nword in the input text and paint it in a canvas to control the output, which is\nvery handy for crafting the desired image in mind. The project page is\navailable at https://deepimagination.cc/eDiff-I/", + "authors": "Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Jiaming Song, Qinsheng Zhang, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, Tero Karras, Ming-Yu Liu", + "published": "2022-11-02", + "updated": "2023-03-14", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1912.04958v2", + "title": "Analyzing and Improving the Image Quality of StyleGAN", + "abstract": "The style-based GAN architecture (StyleGAN) yields state-of-the-art results\nin data-driven unconditional generative image modeling. We expose and analyze\nseveral of its characteristic artifacts, and propose changes in both model\narchitecture and training methods to address them. In particular, we redesign\nthe generator normalization, revisit progressive growing, and regularize the\ngenerator to encourage good conditioning in the mapping from latent codes to\nimages. In addition to improving image quality, this path length regularizer\nyields the additional benefit that the generator becomes significantly easier\nto invert. This makes it possible to reliably attribute a generated image to a\nparticular network. We furthermore visualize how well the generator utilizes\nits output resolution, and identify a capacity problem, motivating us to train\nlarger models for additional quality improvements. Overall, our improved model\nredefines the state of the art in unconditional image modeling, both in terms\nof existing distribution quality metrics as well as perceived image quality.", + "authors": "Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, Timo Aila", + "published": "2019-12-03", + "updated": "2020-03-23", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG", + "cs.NE", + "eess.IV", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1710.07035v1", + "title": "Generative Adversarial Networks: An Overview", + "abstract": "Generative adversarial networks (GANs) provide a way to learn deep\nrepresentations without extensively annotated training data. They achieve this\nthrough deriving backpropagation signals through a competitive process\ninvolving a pair of networks. The representations that can be learned by GANs\nmay be used in a variety of applications, including image synthesis, semantic\nimage editing, style transfer, image super-resolution and classification. The\naim of this review paper is to provide an overview of GANs for the signal\nprocessing community, drawing on familiar analogies and concepts where\npossible. In addition to identifying different methods for training and\nconstructing GANs, we also point to remaining challenges in their theory and\napplication.", + "authors": "Antonia Creswell, Tom White, Vincent Dumoulin, Kai Arulkumaran, Biswa Sengupta, Anil A Bharath", + "published": "2017-10-19", + "updated": "2017-10-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1701.06538v1", + "title": "Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer", + "abstract": "The capacity of a neural network to absorb information is limited by its\nnumber of parameters. Conditional computation, where parts of the network are\nactive on a per-example basis, has been proposed in theory as a way of\ndramatically increasing model capacity without a proportional increase in\ncomputation. In practice, however, there are significant algorithmic and\nperformance challenges. In this work, we address these challenges and finally\nrealize the promise of conditional computation, achieving greater than 1000x\nimprovements in model capacity with only minor losses in computational\nefficiency on modern GPU clusters. We introduce a Sparsely-Gated\nMixture-of-Experts layer (MoE), consisting of up to thousands of feed-forward\nsub-networks. A trainable gating network determines a sparse combination of\nthese experts to use for each example. We apply the MoE to the tasks of\nlanguage modeling and machine translation, where model capacity is critical for\nabsorbing the vast quantities of knowledge available in the training corpora.\nWe present model architectures in which a MoE with up to 137 billion parameters\nis applied convolutionally between stacked LSTM layers. On large language\nmodeling and machine translation benchmarks, these models achieve significantly\nbetter results than state-of-the-art at lower computational cost.", + "authors": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean", + "published": "2017-01-23", + "updated": "2017-01-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "cs.NE", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2006.16668v1", + "title": "GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding", + "abstract": "Neural network scaling has been critical for improving the model quality in\nmany real-world machine learning applications with vast amounts of training\ndata and compute. Although this trend of scaling is affirmed to be a sure-fire\napproach for better model quality, there are challenges on the path such as the\ncomputation cost, ease of programming, and efficient implementation on parallel\ndevices. GShard is a module composed of a set of lightweight annotation APIs\nand an extension to the XLA compiler. It provides an elegant way to express a\nwide range of parallel computation patterns with minimal changes to the\nexisting model code. GShard enabled us to scale up multilingual neural machine\ntranslation Transformer model with Sparsely-Gated Mixture-of-Experts beyond 600\nbillion parameters using automatic sharding. We demonstrate that such a giant\nmodel can efficiently be trained on 2048 TPU v3 accelerators in 4 days to\nachieve far superior quality for translation from 100 languages to English\ncompared to the prior art.", + "authors": "Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, Zhifeng Chen", + "published": "2020-06-30", + "updated": "2020-06-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2204.06125v1", + "title": "Hierarchical Text-Conditional Image Generation with CLIP Latents", + "abstract": "Contrastive models like CLIP have been shown to learn robust representations\nof images that capture both semantics and style. To leverage these\nrepresentations for image generation, we propose a two-stage model: a prior\nthat generates a CLIP image embedding given a text caption, and a decoder that\ngenerates an image conditioned on the image embedding. We show that explicitly\ngenerating image representations improves image diversity with minimal loss in\nphotorealism and caption similarity. Our decoders conditioned on image\nrepresentations can also produce variations of an image that preserve both its\nsemantics and style, while varying the non-essential details absent from the\nimage representation. Moreover, the joint embedding space of CLIP enables\nlanguage-guided image manipulations in a zero-shot fashion. We use diffusion\nmodels for the decoder and experiment with both autoregressive and diffusion\nmodels for the prior, finding that the latter are computationally more\nefficient and produce higher-quality samples.", + "authors": "Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, Mark Chen", + "published": "2022-04-13", + "updated": "2022-04-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.15257v2", + "title": "ERNIE-ViLG 2.0: Improving Text-to-Image Diffusion Model with Knowledge-Enhanced Mixture-of-Denoising-Experts", + "abstract": "Recent progress in diffusion models has revolutionized the popular technology\nof text-to-image generation. While existing approaches could produce\nphotorealistic high-resolution images with text conditions, there are still\nseveral open problems to be solved, which limits the further improvement of\nimage fidelity and text relevancy. In this paper, we propose ERNIE-ViLG 2.0, a\nlarge-scale Chinese text-to-image diffusion model, to progressively upgrade the\nquality of generated images by: (1) incorporating fine-grained textual and\nvisual knowledge of key elements in the scene, and (2) utilizing different\ndenoising experts at different denoising stages. With the proposed mechanisms,\nERNIE-ViLG 2.0 not only achieves a new state-of-the-art on MS-COCO with\nzero-shot FID score of 6.75, but also significantly outperforms recent models\nin terms of image fidelity and image-text alignment, with side-by-side human\nevaluation on the bilingual prompt set ViLG-300.", + "authors": "Zhida Feng, Zhenyu Zhang, Xintong Yu, Yewei Fang, Lanxin Li, Xuyi Chen, Yuxiang Lu, Jiaxiang Liu, Weichong Yin, Shikun Feng, Yu Sun, Li Chen, Hao Tian, Hua Wu, Haifeng Wang", + "published": "2022-10-27", + "updated": "2023-03-28", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.11487v1", + "title": "Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding", + "abstract": "We present Imagen, a text-to-image diffusion model with an unprecedented\ndegree of photorealism and a deep level of language understanding. Imagen\nbuilds on the power of large transformer language models in understanding text\nand hinges on the strength of diffusion models in high-fidelity image\ngeneration. Our key discovery is that generic large language models (e.g. T5),\npretrained on text-only corpora, are surprisingly effective at encoding text\nfor image synthesis: increasing the size of the language model in Imagen boosts\nboth sample fidelity and image-text alignment much more than increasing the\nsize of the image diffusion model. Imagen achieves a new state-of-the-art FID\nscore of 7.27 on the COCO dataset, without ever training on COCO, and human\nraters find Imagen samples to be on par with the COCO data itself in image-text\nalignment. To assess text-to-image models in greater depth, we introduce\nDrawBench, a comprehensive and challenging benchmark for text-to-image models.\nWith DrawBench, we compare Imagen with recent methods including VQ-GAN+CLIP,\nLatent Diffusion Models, and DALL-E 2, and find that human raters prefer Imagen\nover other models in side-by-side comparisons, both in terms of sample quality\nand image-text alignment. See https://imagen.research.google/ for an overview\nof the results.", + "authors": "Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho, David J Fleet, Mohammad Norouzi", + "published": "2022-05-23", + "updated": "2022-05-23", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2101.03961v3", + "title": "Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity", + "abstract": "In deep learning, models typically reuse the same parameters for all inputs.\nMixture of Experts (MoE) defies this and instead selects different parameters\nfor each incoming example. The result is a sparsely-activated model -- with\noutrageous numbers of parameters -- but a constant computational cost. However,\ndespite several notable successes of MoE, widespread adoption has been hindered\nby complexity, communication costs and training instability -- we address these\nwith the Switch Transformer. We simplify the MoE routing algorithm and design\nintuitive improved models with reduced communication and computational costs.\nOur proposed training techniques help wrangle the instabilities and we show\nlarge sparse models may be trained, for the first time, with lower precision\n(bfloat16) formats. We design models based off T5-Base and T5-Large to obtain\nup to 7x increases in pre-training speed with the same computational resources.\nThese improvements extend into multilingual settings where we measure gains\nover the mT5-Base version across all 101 languages. Finally, we advance the\ncurrent scale of language models by pre-training up to trillion parameter\nmodels on the \"Colossal Clean Crawled Corpus\" and achieve a 4x speedup over the\nT5-XXL model.", + "authors": "William Fedus, Barret Zoph, Noam Shazeer", + "published": "2021-01-11", + "updated": "2022-06-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2112.10752v2", + "title": "High-Resolution Image Synthesis with Latent Diffusion Models", + "abstract": "By decomposing the image formation process into a sequential application of\ndenoising autoencoders, diffusion models (DMs) achieve state-of-the-art\nsynthesis results on image data and beyond. Additionally, their formulation\nallows for a guiding mechanism to control the image generation process without\nretraining. However, since these models typically operate directly in pixel\nspace, optimization of powerful DMs often consumes hundreds of GPU days and\ninference is expensive due to sequential evaluations. To enable DM training on\nlimited computational resources while retaining their quality and flexibility,\nwe apply them in the latent space of powerful pretrained autoencoders. In\ncontrast to previous work, training diffusion models on such a representation\nallows for the first time to reach a near-optimal point between complexity\nreduction and detail preservation, greatly boosting visual fidelity. By\nintroducing cross-attention layers into the model architecture, we turn\ndiffusion models into powerful and flexible generators for general conditioning\ninputs such as text or bounding boxes and high-resolution synthesis becomes\npossible in a convolutional manner. Our latent diffusion models (LDMs) achieve\na new state of the art for image inpainting and highly competitive performance\non various tasks, including unconditional image generation, semantic scene\nsynthesis, and super-resolution, while significantly reducing computational\nrequirements compared to pixel-based DMs. Code is available at\nhttps://github.com/CompVis/latent-diffusion .", + "authors": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Bj\u00f6rn Ommer", + "published": "2021-12-20", + "updated": "2022-04-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1711.11585v2", + "title": "High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs", + "abstract": "We present a new method for synthesizing high-resolution photo-realistic\nimages from semantic label maps using conditional generative adversarial\nnetworks (conditional GANs). Conditional GANs have enabled a variety of\napplications, but the results are often limited to low-resolution and still far\nfrom realistic. In this work, we generate 2048x1024 visually appealing results\nwith a novel adversarial loss, as well as new multi-scale generator and\ndiscriminator architectures. Furthermore, we extend our framework to\ninteractive visual manipulation with two additional features. First, we\nincorporate object instance segmentation information, which enables object\nmanipulations such as removing/adding objects and changing the object category.\nSecond, we propose a method to generate diverse results given the same input,\nallowing users to edit the object appearance interactively. Human opinion\nstudies demonstrate that our method significantly outperforms existing methods,\nadvancing both the quality and the resolution of deep image synthesis and\nediting.", + "authors": "Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, Bryan Catanzaro", + "published": "2017-11-30", + "updated": "2018-08-20", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.GR", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.13131v1", + "title": "Make-A-Scene: Scene-Based Text-to-Image Generation with Human Priors", + "abstract": "Recent text-to-image generation methods provide a simple yet exciting\nconversion capability between text and image domains. While these methods have\nincrementally improved the generated image fidelity and text relevancy, several\npivotal gaps remain unanswered, limiting applicability and quality. We propose\na novel text-to-image method that addresses these gaps by (i) enabling a simple\ncontrol mechanism complementary to text in the form of a scene, (ii)\nintroducing elements that substantially improve the tokenization process by\nemploying domain-specific knowledge over key image regions (faces and salient\nobjects), and (iii) adapting classifier-free guidance for the transformer use\ncase. Our model achieves state-of-the-art FID and human evaluation results,\nunlocking the ability to generate high fidelity images in a resolution of\n512x512 pixels, significantly improving visual quality. Through scene\ncontrollability, we introduce several new capabilities: (i) Scene editing, (ii)\ntext editing with anchor scenes, (iii) overcoming out-of-distribution text\nprompts, and (iv) story illustration generation, as demonstrated in the story\nwe wrote.", + "authors": "Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, Yaniv Taigman", + "published": "2022-03-24", + "updated": "2022-03-24", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.CL", + "cs.GR", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.04377v2", + "title": "Convergence Rates for Gaussian Mixtures of Experts", + "abstract": "We provide a theoretical treatment of over-specified Gaussian mixtures of\nexperts with covariate-free gating networks. We establish the convergence rates\nof the maximum likelihood estimation (MLE) for these models. Our proof\ntechnique is based on a novel notion of \\emph{algebraic independence} of the\nexpert functions. Drawing on optimal transport theory, we establish a\nconnection between the algebraic independence and a certain class of partial\ndifferential equations (PDEs). Exploiting this connection allows us to derive\nconvergence rates and minimax lower bounds for parameter estimation.", + "authors": "Nhat Ho, Chiao-Yu Yang, Michael I. Jordan", + "published": "2019-07-09", + "updated": "2022-03-08", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "cs.LG", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2305.03288v2", + "title": "Demystifying Softmax Gating Function in Gaussian Mixture of Experts", + "abstract": "Understanding the parameter estimation of softmax gating Gaussian mixture of\nexperts has remained a long-standing open problem in the literature. It is\nmainly due to three fundamental theoretical challenges associated with the\nsoftmax gating function: (i) the identifiability only up to the translation of\nparameters; (ii) the intrinsic interaction via partial differential equations\nbetween the softmax gating and the expert functions in the Gaussian density;\n(iii) the complex dependence between the numerator and denominator of the\nconditional density of softmax gating Gaussian mixture of experts. We resolve\nthese challenges by proposing novel Voronoi loss functions among parameters and\nestablishing the convergence rates of maximum likelihood estimator (MLE) for\nsolving parameter estimation in these models. When the true number of experts\nis unknown and over-specified, our findings show a connection between the\nconvergence rate of the MLE and a solvability problem of a system of polynomial\nequations.", + "authors": "Huy Nguyen, TrungTin Nguyen, Nhat Ho", + "published": "2023-05-05", + "updated": "2023-10-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "math.ST", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.14976v4", + "title": "MoCaE: Mixture of Calibrated Experts Significantly Improves Object Detection", + "abstract": "Combining the strengths of many existing predictors to obtain a Mixture of\nExperts which is superior to its individual components is an effective way to\nimprove the performance without having to develop new architectures or train a\nmodel from scratch. However, surprisingly, we find that na\\\"ively combining\nexpert object detectors in a similar way to Deep Ensembles, can often lead to\ndegraded performance. We identify that the primary cause of this issue is that\nthe predictions of the experts do not match their performance, a term referred\nto as miscalibration. Consequently, the most confident detector dominates the\nfinal predictions, preventing the mixture from leveraging all the predictions\nfrom the experts appropriately. To address this, when constructing the Mixture\nof Experts, we propose to combine their predictions in a manner which reflects\nthe individual performance of the experts; an objective we achieve by first\ncalibrating the predictions before filtering and refining them. We term this\napproach the Mixture of Calibrated Experts and demonstrate its effectiveness\nthrough extensive experiments on 5 different detection tasks using a variety of\ndetectors, showing that it: (i) improves object detectors on COCO and instance\nsegmentation methods on LVIS by up to $\\sim 2.5$ AP; (ii) reaches\nstate-of-the-art on COCO test-dev with $65.1$ AP and on DOTA with $82.62$\n$\\mathrm{AP_{50}}$; (iii) outperforms single models consistently on recent\ndetection tasks such as Open Vocabulary Object Detection.", + "authors": "Kemal Oksuz, Selim Kuzucu, Tom Joy, Puneet K. Dokania", + "published": "2023-09-26", + "updated": "2024-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2112.14397v2", + "title": "EvoMoE: An Evolutional Mixture-of-Experts Training Framework via Dense-To-Sparse Gate", + "abstract": "Mixture-of-experts (MoE) is becoming popular due to its success in improving\nthe model quality, especially in Transformers. By routing tokens with a sparse\ngate to a few experts (i.e., a small pieces of the full model), MoE can easily\nincrease the model parameters to a very large scale while keeping the\ncomputation cost in a constant level. Most existing works just initialize some\nrandom experts, set a fixed gating strategy (e.g., Top-k), and train the model\nfrom scratch in an ad-hoc way. We identify that these MoE models are suffering\nfrom the immature experts and unstable sparse gate, which are harmful to the\nconvergence performance. In this paper, we propose an efficient end-to-end MoE\ntraining framework called EvoMoE. EvoMoE starts from training one single expert\nand gradually evolves into a large and sparse MoE structure. EvoMoE mainly\ncontains two phases: the expert-diversify phase to train the base expert for a\nwhile and spawn multiple diverse experts from it, and the gate-sparsify phase\nto learn an adaptive sparse gate and activate a dynamic number of experts.\nEvoMoE naturally decouples the joint learning of both the experts and the\nsparse gate and focuses on learning the basic knowledge with a single expert at\nthe early training stage. Then it diversifies the experts and continues to\ntrain the MoE with a novel Dense-to-Sparse gate (DTS-Gate). Specifically,\ninstead of using a permanent sparse gate, DTS-Gate begins as a dense gate that\nroutes tokens to all experts, then gradually and adaptively becomes sparser\nwhile routes to fewer experts. Evaluations are conducted on three popular\nmodels and tasks, including RoBERTa for masked language modeling task, GPT for\nlanguage modeling task and Transformer for machine translation task. The\nresults show that EvoMoE outperforms existing baselines, including Switch, BASE\nLayer, Hash Layer and StableMoE.", + "authors": "Xiaonan Nie, Xupeng Miao, Shijie Cao, Lingxiao Ma, Qibin Liu, Jilong Xue, Youshan Miao, Yi Liu, Zhi Yang, Bin Cui", + "published": "2021-12-29", + "updated": "2022-10-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2404.15045v1", + "title": "Multi-Head Mixture-of-Experts", + "abstract": "Sparse Mixtures of Experts (SMoE) scales model capacity without significant\nincreases in training and inference costs, but exhibits the following two\nissues: (1) Low expert activation, where only a small subset of experts are\nactivated for optimization. (2) Lacking fine-grained analytical capabilities\nfor multiple semantic concepts within individual tokens. We propose Multi-Head\nMixture-of-Experts (MH-MoE), which employs a multi-head mechanism to split each\ntoken into multiple sub-tokens. These sub-tokens are then assigned to and\nprocessed by a diverse set of experts in parallel, and seamlessly reintegrated\ninto the original token form. The multi-head mechanism enables the model to\ncollectively attend to information from various representation spaces within\ndifferent experts, while significantly enhances expert activation, thus deepens\ncontext understanding and alleviate overfitting. Moreover, our MH-MoE is\nstraightforward to implement and decouples from other SMoE optimization\nmethods, making it easy to integrate with other SMoE models for enhanced\nperformance. Extensive experimental results across three tasks: English-focused\nlanguage modeling, Multi-lingual language modeling and Masked multi-modality\nmodeling tasks, demonstrate the effectiveness of MH-MoE.", + "authors": "Xun Wu, Shaohan Huang, Wenhui Wang, Furu Wei", + "published": "2024-04-23", + "updated": "2024-04-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12550v1", + "title": "Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization", + "abstract": "The Mixture of Experts (MoE) paradigm provides a powerful way to decompose\ninscrutable dense layers into smaller, modular computations often more amenable\nto human interpretation, debugging, and editability. A major problem however\nlies in the computational cost of scaling the number of experts to achieve\nsufficiently fine-grained specialization. In this paper, we propose the\nMultilinear Mixutre of Experts (MMoE) layer to address this, focusing on vision\nmodels. MMoE layers perform an implicit computation on prohibitively large\nweight tensors entirely in factorized form. Consequently, MMoEs both (1) avoid\nthe issues incurred through the discrete expert routing in the popular 'sparse'\nMoE models, yet (2) do not incur the restrictively high inference-time costs of\n'soft' MoE alternatives. We present both qualitative and quantitative evidence\n(through visualization and counterfactual interventions respectively) that\nscaling MMoE layers when fine-tuning foundation models for vision tasks leads\nto more specialized experts at the class-level whilst remaining competitive\nwith the performance of parameter-matched linear layer counterparts. Finally,\nwe show that learned expert specialism further facilitates manual correction of\ndemographic bias in CelebA attribute classification. Our MMoE model code is\navailable at https://github.com/james-oldfield/MMoE.", + "authors": "James Oldfield, Markos Georgopoulos, Grigorios G. Chrysos, Christos Tzelepis, Yannis Panagakis, Mihalis A. Nicolaou, Jiankang Deng, Ioannis Patras", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2303.06318v2", + "title": "A Hybrid Tensor-Expert-Data Parallelism Approach to Optimize Mixture-of-Experts Training", + "abstract": "Mixture-of-Experts (MoE) is a neural network architecture that adds sparsely\nactivated expert blocks to a base model, increasing the number of parameters\nwithout impacting computational costs. However, current distributed deep\nlearning frameworks are limited in their ability to train high-quality MoE\nmodels with large base models. In this work, we present DeepSpeed-TED, a novel,\nthree-dimensional, hybrid parallel algorithm that combines data, tensor, and\nexpert parallelism to enable the training of MoE models with 4 to 8x larger\nbase models than the current state-of-the-art. We also describe memory\noptimizations in the optimizer step, and communication optimizations that\neliminate unnecessary data movement. We implement our approach in DeepSpeed and\nachieve speedups of 26% over a baseline (i.e. without our communication\noptimizations) when training a 40 billion parameter MoE model (6.7 billion base\nmodel with 16 experts) on 128 V100 GPUs.", + "authors": "Siddharth Singh, Olatunji Ruwase, Ammar Ahmad Awan, Samyam Rajbhandari, Yuxiong He, Abhinav Bhatele", + "published": "2023-03-11", + "updated": "2023-05-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC", + "cs.PF" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2107.04694v1", + "title": "Lifelong Mixture of Variational Autoencoders", + "abstract": "In this paper, we propose an end-to-end lifelong learning mixture of experts.\nEach expert is implemented by a Variational Autoencoder (VAE). The experts in\nthe mixture system are jointly trained by maximizing a mixture of individual\ncomponent evidence lower bounds (MELBO) on the log-likelihood of the given\ntraining samples. The mixing coefficients in the mixture, control the\ncontributions of each expert in the goal representation. These are sampled from\na Dirichlet distribution whose parameters are determined through non-parametric\nestimation during lifelong learning. The model can learn new tasks fast when\nthese are similar to those previously learnt. The proposed Lifelong mixture of\nVAE (L-MVAE) expands its architecture with new components when learning a\ncompletely new task. After the training, our model can automatically determine\nthe relevant expert to be used when fed with new data samples. This mechanism\nbenefits both the memory efficiency and the required computational cost as only\none expert is used during the inference. The L-MVAE inference model is able to\nperform interpolation in the joint latent space across the data domains\nassociated with different tasks and is shown to be efficient for disentangled\nlearning representation.", + "authors": "Fei Ye, Adrian G. Bors", + "published": "2021-07-09", + "updated": "2021-07-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2008.09662v1", + "title": "Biased Mixtures Of Experts: Enabling Computer Vision Inference Under Data Transfer Limitations", + "abstract": "We propose a novel mixture-of-experts class to optimize computer vision\nmodels in accordance with data transfer limitations at test time. Our approach\npostulates that the minimum acceptable amount of data allowing for\nhighly-accurate results can vary for different input space partitions.\nTherefore, we consider mixtures where experts require different amounts of\ndata, and train a sparse gating function to divide the input space for each\nexpert. By appropriate hyperparameter selection, our approach is able to bias\nmixtures of experts towards selecting specific experts over others. In this\nway, we show that the data transfer optimization between visual sensing and\nprocessing can be solved as a convex optimization problem.To demonstrate the\nrelation between data availability and performance, we evaluate biased mixtures\non a range of mainstream computer vision problems, namely: (i) single shot\ndetection, (ii) image super resolution, and (iii) realtime video action\nclassification. For all cases, and when experts constitute modified baselines\nto meet different limits on allowed data utility, biased mixtures significantly\noutperform previous work optimized to meet the same constraints on available\ndata.", + "authors": "Alhabib Abbas, Yiannis Andreopoulos", + "published": "2020-08-21", + "updated": "2020-08-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.IV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09832v3", + "title": "Merging Experts into One: Improving Computational Efficiency of Mixture of Experts", + "abstract": "Scaling the size of language models usually leads to remarkable advancements\nin NLP tasks. But it often comes with a price of growing computational cost.\nAlthough a sparse Mixture of Experts (MoE) can reduce the cost by activating a\nsmall subset of parameters (e.g., one expert) for each input, its computation\nescalates significantly if increasing the number of activated experts, limiting\nits practical utility. Can we retain the advantages of adding more experts\nwithout substantially increasing the computational costs? In this paper, we\nfirst demonstrate the superiority of selecting multiple experts and then\npropose a computation-efficient approach called \\textbf{\\texttt{Merging Experts\ninto One}} (MEO), which reduces the computation cost to that of a single\nexpert. Extensive experiments show that MEO significantly improves\ncomputational efficiency, e.g., FLOPS drops from 72.0G of vanilla MoE to 28.6G\n(MEO). Moreover, we propose a token-level attention block that further enhances\nthe efficiency and performance of token-level MEO, e.g., 83.3\\% (MEO) vs.\n82.6\\% (vanilla MoE) average score on the GLUE benchmark. Our code will be\nreleased upon acceptance. Code will be released at:\n\\url{https://github.com/Shwai-He/MEO}.", + "authors": "Shwai He, Run-Ze Fan, Liang Ding, Li Shen, Tianyi Zhou, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-11-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12656v2", + "title": "HyperMoE: Paying Attention to Unselected Experts in Mixture of Experts via Dynamic Transfer", + "abstract": "The Mixture of Experts (MoE) for language models has been proven effective in\naugmenting the capacity of models by dynamically routing each input token to a\nspecific subset of experts for processing. Despite the success, most existing\nmethods face a challenge for balance between sparsity and the availability of\nexpert knowledge: enhancing performance through increased use of expert\nknowledge often results in diminishing sparsity during expert selection. To\nmitigate this contradiction, we propose HyperMoE, a novel MoE framework built\nupon Hypernetworks. This framework integrates the computational processes of\nMoE with the concept of knowledge transferring in multi-task learning. Specific\nmodules generated based on the information of unselected experts serve as\nsupplementary information, which allows the knowledge of experts not selected\nto be used while maintaining selection sparsity. Our comprehensive empirical\nevaluations across multiple datasets and backbones establish that HyperMoE\nsignificantly outperforms existing MoE methods under identical conditions\nconcerning the number of experts.", + "authors": "Hao Zhao, Zihan Qiu, Huijia Wu, Zili Wang, Zhaofeng He, Jie Fu", + "published": "2024-02-20", + "updated": "2024-02-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05220v1", + "title": "On Parameter Estimation in Deviated Gaussian Mixture of Experts", + "abstract": "We consider the parameter estimation problem in the deviated Gaussian mixture\nof experts in which the data are generated from $(1 - \\lambda^{\\ast}) g_0(Y|\nX)+ \\lambda^{\\ast} \\sum_{i = 1}^{k_{\\ast}} p_{i}^{\\ast}\nf(Y|(a_{i}^{\\ast})^{\\top}X+b_i^{\\ast},\\sigma_{i}^{\\ast})$, where $X, Y$ are\nrespectively a covariate vector and a response variable, $g_{0}(Y|X)$ is a\nknown function, $\\lambda^{\\ast} \\in [0, 1]$ is true but unknown mixing\nproportion, and $(p_{i}^{\\ast}, a_{i}^{\\ast}, b_{i}^{\\ast}, \\sigma_{i}^{\\ast})$\nfor $1 \\leq i \\leq k^{\\ast}$ are unknown parameters of the Gaussian mixture of\nexperts. This problem arises from the goodness-of-fit test when we would like\nto test whether the data are generated from $g_{0}(Y|X)$ (null hypothesis) or\nthey are generated from the whole mixture (alternative hypothesis). Based on\nthe algebraic structure of the expert functions and the distinguishability\nbetween $g_0$ and the mixture part, we construct novel Voronoi-based loss\nfunctions to capture the convergence rates of maximum likelihood estimation\n(MLE) for our models. We further demonstrate that our proposed loss functions\ncharacterize the local convergence rates of parameter estimation more\naccurately than the generalized Wasserstein, a loss function being commonly\nused for estimating parameters in the Gaussian mixture of experts.", + "authors": "Huy Nguyen, Khai Nguyen, Nhat Ho", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.11706v1", + "title": "Mixture of ELM based experts with trainable gating network", + "abstract": "Mixture of experts method is a neural network based ensemble learning that\nhas great ability to improve the overall classification accuracy. This method\nis based on the divide and conquer principle, in which the problem space is\ndivided between several experts by supervisition of gating network. In this\npaper, we propose an ensemble learning method based on mixture of experts which\nis named mixture of ELM based experts with trainable gating network (MEETG) to\nimprove the computing cost and to speed up the learning process of ME. The\nstructure of ME consists of multi layer perceptrons (MLPs) as base experts and\ngating network, in which gradient-based learning algorithm is applied for\ntraining the MLPs which is an iterative and time consuming process. In order to\novercome on these problems, we use the advantages of extreme learning machine\n(ELM) for designing the structure of ME. ELM as a learning algorithm for single\nhidden-layer feed forward neural networks provides much faster learning process\nand better generalization ability in comparision with some other traditional\nlearning algorithms. Also, in the proposed method a trainable gating network is\napplied to aggregate the outputs of the experts dynamically according to the\ninput sample. Our experimental results and statistical analysis on 11 benchmark\ndatasets confirm that MEETG has an acceptable performance in classification\nproblems. Furthermore, our experimental results show that the proposed approach\noutperforms the original ELM on prediction stability and classification\naccuracy.", + "authors": "Laleh Armi, Elham Abbasi, Jamal Zarepour-Ahmadabadi", + "published": "2021-05-25", + "updated": "2021-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1806.08200v1", + "title": "Mixtures of Experts Models", + "abstract": "Mixtures of experts models provide a framework in which covariates may be\nincluded in mixture models. This is achieved by modelling the parameters of the\nmixture model as functions of the concomitant covariates. Given their mixture\nmodel foundation, mixtures of experts models possess a diverse range of\nanalytic uses, from clustering observations to capturing parameter\nheterogeneity in cross-sectional data. This chapter focuses on delineating the\nmixture of experts modelling framework and demonstrates the utility and\nflexibility of mixtures of experts models as an analytic tool.", + "authors": "Isobel Claire Gormley, Sylvia Fr\u00fchwirth-Schnatter", + "published": "2018-06-21", + "updated": "2018-06-21", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.09179v3", + "title": "On the Representation Collapse of Sparse Mixture of Experts", + "abstract": "Sparse mixture of experts provides larger model capacity while requiring a\nconstant computational overhead. It employs the routing mechanism to distribute\ninput tokens to the best-matched experts according to their hidden\nrepresentations. However, learning such a routing mechanism encourages token\nclustering around expert centroids, implying a trend toward representation\ncollapse. In this work, we propose to estimate the routing scores between\ntokens and experts on a low-dimensional hypersphere. We conduct extensive\nexperiments on cross-lingual language model pre-training and fine-tuning on\ndownstream tasks. Experimental results across seven multilingual benchmarks\nshow that our method achieves consistent gains. We also present a comprehensive\nanalysis on the representation and routing behaviors of our models. Our method\nalleviates the representation collapse issue and achieves more consistent\nrouting than the baseline mixture-of-experts methods.", + "authors": "Zewen Chi, Li Dong, Shaohan Huang, Damai Dai, Shuming Ma, Barun Patra, Saksham Singhal, Payal Bajaj, Xia Song, Xian-Ling Mao, Heyan Huang, Furu Wei", + "published": "2022-04-20", + "updated": "2022-10-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.06066v1", + "title": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", + "abstract": "In the era of large language models, Mixture-of-Experts (MoE) is a promising\narchitecture for managing computational costs when scaling up model parameters.\nHowever, conventional MoE architectures like GShard, which activate the top-$K$\nout of $N$ experts, face challenges in ensuring expert specialization, i.e.\neach expert acquires non-overlapping and focused knowledge. In response, we\npropose the DeepSeekMoE architecture towards ultimate expert specialization. It\ninvolves two principal strategies: (1) finely segmenting the experts into $mN$\nones and activating $mK$ from them, allowing for a more flexible combination of\nactivated experts; (2) isolating $K_s$ experts as shared ones, aiming at\ncapturing common knowledge and mitigating redundancy in routed experts.\nStarting from a modest scale with 2B parameters, we demonstrate that\nDeepSeekMoE 2B achieves comparable performance with GShard 2.9B, which has 1.5\ntimes the expert parameters and computation. In addition, DeepSeekMoE 2B nearly\napproaches the performance of its dense counterpart with the same number of\ntotal parameters, which set the upper bound of MoE models. Subsequently, we\nscale up DeepSeekMoE to 16B parameters and show that it achieves comparable\nperformance with LLaMA2 7B, with only about 40% of computations. Further, our\npreliminary efforts to scale up DeepSeekMoE to 145B parameters consistently\nvalidate its substantial advantages over the GShard architecture, and show its\nperformance comparable with DeepSeek 67B, using only 28.5% (maybe even 18.2%)\nof computations.", + "authors": "Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, Wenfeng Liang", + "published": "2024-01-11", + "updated": "2024-01-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1612.06879v1", + "title": "Robust mixture of experts modeling using the skew $t$ distribution", + "abstract": "Mixture of Experts (MoE) is a popular framework in the fields of statistics\nand machine learning for modeling heterogeneity in data for regression,\nclassification and clustering. MoE for continuous data are usually based on the\nnormal distribution. However, it is known that for data with asymmetric\nbehavior, heavy tails and atypical observations, the use of the normal\ndistribution is unsuitable. We introduce a new robust non-normal mixture of\nexperts modeling using the skew $t$ distribution. The proposed skew $t$ mixture\nof experts, named STMoE, handles these issues of the normal mixtures experts\nregarding possibly skewed, heavy-tailed and noisy data. We develop a dedicated\nexpectation conditional maximization (ECM) algorithm to estimate the model\nparameters by monotonically maximizing the observed data log-likelihood. We\ndescribe how the presented model can be used in prediction and in model-based\nclustering of regression data. Numerical experiments carried out on simulated\ndata show the effectiveness and the robustness of the proposed model in fitting\nnon-linear regression functions as well as in model-based clustering. Then, the\nproposed model is applied to the real-world data of tone perception for musical\ndata analysis, and the one of temperature anomalies for the analysis of climate\nchange data. The obtained results confirm the usefulness of the model for\npractical data analysis applications.", + "authors": "Faicel Chamroukhi", + "published": "2016-12-09", + "updated": "2016-12-09", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG", + "stat.ML", + "62, 62F, 62H30, 62h", + "G.3; I.2.6; I.5.1" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.01334v2", + "title": "Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy", + "abstract": "Sparsely activated Mixture-of-Experts (SMoE) has shown promise to scale up\nthe learning capacity of neural networks, however, they have issues like (a)\nHigh Memory Usage, due to duplication of the network layers into multiple\ncopies as experts; and (b) Redundancy in Experts, as common learning-based\nrouting policies suffer from representational collapse. Therefore, vanilla SMoE\nmodels are memory inefficient and non-scalable, especially for\nresource-constrained downstream scenarios. In this paper, we ask: Can we craft\na compact SMoE model by consolidating expert information? What is the best\nrecipe to merge multiple experts into fewer but more knowledgeable experts? Our\npilot investigation reveals that conventional model merging methods fail to be\neffective in such expert merging for SMoE. The potential reasons are: (1)\nredundant information overshadows critical experts; (2) appropriate neuron\npermutation for each expert is missing to bring all of them in alignment. To\naddress this, we propose M-SMoE, which leverages routing statistics to guide\nexpert merging. Specifically, it starts with neuron permutation alignment for\nexperts; then, dominant experts and their \"group members\" are formed; lastly,\nevery expert group is merged into a single expert by utilizing each expert's\nactivation frequency as their weight for merging, thus diminishing the impact\nof insignificant experts. Moreover, we observed that our proposed merging\npromotes a low dimensionality in the merged expert's weight space, naturally\npaving the way for additional compression. Hence, our final method, MC-SMoE\n(i.e., Merge, then Compress SMoE), further decomposes the merged experts into\nlow-rank and structural sparse alternatives. Extensive experiments across 8\nbenchmarks validate the effectiveness of MC-SMoE. For instance, our MC-SMoE\nachieves up to 80% memory and a 20% FLOPs reduction, with virtually no loss in\nperformance.", + "authors": "Pingzhi Li, Zhenyu Zhang, Prateek Yadav, Yi-Lin Sung, Yu Cheng, Mohit Bansal, Tianlong Chen", + "published": "2023-10-02", + "updated": "2024-03-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.14800v1", + "title": "Not All Experts are Equal: Efficient Expert Pruning and Skipping for Mixture-of-Experts Large Language Models", + "abstract": "A pivotal advancement in the progress of large language models (LLMs) is the\nemergence of the Mixture-of-Experts (MoE) LLMs. Compared to traditional LLMs,\nMoE LLMs can achieve higher performance with fewer parameters, but it is still\nhard to deploy them due to their immense parameter sizes. Different from\nprevious weight pruning methods that rely on specifically designed hardware,\nthis paper mainly aims to enhance the deployment efficiency of MoE LLMs by\nintroducing plug-and-play expert-level sparsification techniques. Specifically,\nwe propose, for the first time to our best knowledge, post-training approaches\nfor task-agnostic and task-specific expert pruning and skipping of MoE LLMs,\ntailored to improve deployment efficiency while maintaining model performance\nacross a wide range of tasks. Extensive experiments show that our proposed\nmethods can simultaneously reduce model sizes and increase the inference speed,\nwhile maintaining satisfactory performance. Data and code will be available at\nhttps://github.com/Lucky-Lance/Expert_Sparsity.", + "authors": "Xudong Lu, Qi Liu, Yuhui Xu, Aojun Zhou, Siyuan Huang, Bo Zhang, Junchi Yan, Hongsheng Li", + "published": "2024-02-22", + "updated": "2024-02-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.13850v2", + "title": "Statistical Perspective of Top-K Sparse Softmax Gating Mixture of Experts", + "abstract": "Top-K sparse softmax gating mixture of experts has been widely used for\nscaling up massive deep-learning architectures without increasing the\ncomputational cost. Despite its popularity in real-world applications, the\ntheoretical understanding of that gating function has remained an open problem.\nThe main challenge comes from the structure of the top-K sparse softmax gating\nfunction, which partitions the input space into multiple regions with distinct\nbehaviors. By focusing on a Gaussian mixture of experts, we establish\ntheoretical results on the effects of the top-K sparse softmax gating function\non both density and parameter estimations. Our results hinge upon defining\nnovel loss functions among parameters to capture different behaviors of the\ninput regions. When the true number of experts $k_{\\ast}$ is known, we\ndemonstrate that the convergence rates of density and parameter estimations are\nboth parametric on the sample size. However, when $k_{\\ast}$ becomes unknown\nand the true model is over-specified by a Gaussian mixture of $k$ experts where\n$k > k_{\\ast}$, our findings suggest that the number of experts selected from\nthe top-K sparse softmax gating function must exceed the total cardinality of a\ncertain number of Voronoi cells associated with the true parameters to\nguarantee the convergence of the density estimation. Moreover, while the\ndensity estimation rate remains parametric under this setting, the parameter\nestimation rates become substantially slow due to an intrinsic interaction\nbetween the softmax gating and expert functions.", + "authors": "Huy Nguyen, Pedram Akbarian, Fanqi Yan, Nhat Ho", + "published": "2023-09-25", + "updated": "2024-02-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02410v1", + "title": "Mixture of Quantized Experts (MoQE): Complementary Effect of Low-bit Quantization and Robustness", + "abstract": "Large Mixture of Experts (MoE) models could achieve state-of-the-art quality\non various language tasks, including machine translation task, thanks to the\nefficient model scaling capability with expert parallelism. However, it has\nbrought a fundamental issue of larger memory consumption and increased memory\nbandwidth bottleneck at deployment time. In this paper, we propose Mixture of\nQuantized Experts (MoQE) which is a simple weight-only quantization method\napplying ultra low-bit down to 2-bit quantizations only to expert weights for\nmitigating the increased memory and latency issues of MoE models. We show that\nlow-bit quantization together with the MoE architecture delivers a reliable\nmodel performance while reducing the memory size significantly even without any\nadditional training in most cases. In particular, expert layers in MoE models\nare much more robust to the quantization than conventional feedforward networks\n(FFN) layers. In our comprehensive analysis, we show that MoE models with 2-bit\nexpert weights can deliver better model performance than the dense model\ntrained on the same dataset. As a result of low-bit quantization, we show the\nmodel size can be reduced by 79.6% of the original half precision floating\npoint (fp16) MoE model. Combined with an optimized GPU runtime implementation,\nit also achieves 1.24X speed-up on A100 GPUs.", + "authors": "Young Jin Kim, Raffy Fahim, Hany Hassan Awadalla", + "published": "2023-10-03", + "updated": "2023-10-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.13934v1", + "title": "Functional mixture-of-experts for classification", + "abstract": "We develop a mixtures-of-experts (ME) approach to the multiclass\nclassification where the predictors are univariate functions. It consists of a\nME model in which both the gating network and the experts network are\nconstructed upon multinomial logistic activation functions with functional\ninputs. We perform a regularized maximum likelihood estimation in which the\ncoefficient functions enjoy interpretable sparsity constraints on targeted\nderivatives. We develop an EM-Lasso like algorithm to compute the regularized\nMLE and evaluate the proposed approach on simulated and real data.", + "authors": "Nhat Thien Pham, Faicel Chamroukhi", + "published": "2022-02-28", + "updated": "2022-02-28", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.00372v1", + "title": "Visual Saliency Prediction Using a Mixture of Deep Neural Networks", + "abstract": "Visual saliency models have recently begun to incorporate deep learning to\nachieve predictive capacity much greater than previous unsupervised methods.\nHowever, most existing models predict saliency using local mechanisms limited\nto the receptive field of the network. We propose a model that incorporates\nglobal scene semantic information in addition to local information gathered by\na convolutional neural network. Our model is formulated as a mixture of\nexperts. Each expert network is trained to predict saliency for a set of\nclosely related images. The final saliency map is computed as a weighted\nmixture of the expert networks' output, with weights determined by a separate\ngating network. This gating network is guided by global scene information to\npredict weights. The expert networks and the gating network are trained\nsimultaneously in an end-to-end manner. We show that our mixture formulation\nleads to improvement in performance over an otherwise identical non-mixture\nmodel that does not incorporate global scene information.", + "authors": "Samuel Dodge, Lina Karam", + "published": "2017-02-01", + "updated": "2017-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17404v1", + "title": "Generalization Error Analysis for Sparse Mixture-of-Experts: A Preliminary Study", + "abstract": "Mixture-of-Experts (MoE) represents an ensemble methodology that amalgamates\npredictions from several specialized sub-models (referred to as experts). This\nfusion is accomplished through a router mechanism, dynamically assigning\nweights to each expert's contribution based on the input data. Conventional MoE\nmechanisms select all available experts, incurring substantial computational\ncosts. In contrast, Sparse Mixture-of-Experts (Sparse MoE) selectively engages\nonly a limited number, or even just one expert, significantly reducing\ncomputation overhead while empirically preserving, and sometimes even\nenhancing, performance. Despite its wide-ranging applications and these\nadvantageous characteristics, MoE's theoretical underpinnings have remained\nelusive. In this paper, we embark on an exploration of Sparse MoE's\ngeneralization error concerning various critical factors. Specifically, we\ninvestigate the impact of the number of data samples, the total number of\nexperts, the sparsity in expert selection, the complexity of the routing\nmechanism, and the complexity of individual experts. Our analysis sheds light\non \\textit{how \\textbf{sparsity} contributes to the MoE's generalization},\noffering insights from the perspective of classical learning theory.", + "authors": "Jinze Zhao, Peihao Wang, Zhangyang Wang", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2010.14260v2", + "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", + "abstract": "In this paper, we consider mixtures of two Mallows models for top-$k$\nrankings, both with the same location parameter but with different scale\nparameters, i.e., a mixture of concentric Mallows models. This situation arises\nwhen we have a heterogeneous population of voters formed by two homogeneous\npopulations, one of which is a subpopulation of expert voters while the other\nincludes the non-expert voters. We propose efficient sampling algorithms for\nMallows top-$k$ rankings. We show the identifiability of both components, and\nthe learnability of their respective parameters in this setting by, first,\nbounding the sample complexity for the Borda algorithm with top-$k$ rankings\nand second, proposing polynomial time algorithm for the separation of the\nrankings in each component. Finally, since the rank aggregation will suffer\nfrom a large amount of noise introduced by the non-expert voters, we adapt the\nBorda algorithm to be able to recover the ground truth consensus ranking which\nis especially consistent with the expert rankings.", + "authors": "Collas Fabien, Irurozki Ekhine", + "published": "2020-10-27", + "updated": "2020-11-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.13833v2", + "title": "Mixtures of Gaussian process experts based on kernel stick-breaking processes", + "abstract": "Mixtures of Gaussian process experts is a class of models that can\nsimultaneously address two of the key limitations inherent in standard Gaussian\nprocesses: scalability and predictive performance. In particular, models that\nuse Dirichlet processes as gating functions permit straightforward\ninterpretation and automatic selection of the number of experts in a mixture.\nWhile the existing models are intuitive and capable of capturing\nnon-stationarity, multi-modality and heteroskedasticity, the simplicity of\ntheir gating functions may limit the predictive performance when applied to\ncomplex data-generating processes. Capitalising on the recent advancement in\nthe dependent Dirichlet processes literature, we propose a new mixture model of\nGaussian process experts based on kernel stick-breaking processes. Our model\nmaintains the intuitive appeal yet improve the performance of the existing\nmodels. To make it practical, we design a sampler for posterior computation\nbased on the slice sampling. The model behaviour and improved predictive\nperformance are demonstrated in experiments using six datasets.", + "authors": "Yuji Saikai, Khue-Dung Dang", + "published": "2023-04-26", + "updated": "2023-05-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1905.12969v1", + "title": "Enriched Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts probabilistically divide the input space into regions,\nwhere the assumptions of each expert, or conditional model, need only hold\nlocally. Combined with Gaussian process (GP) experts, this results in a\npowerful and highly flexible model. We focus on alternative mixtures of GP\nexperts, which model the joint distribution of the inputs and targets\nexplicitly. We highlight issues of this approach in multi-dimensional input\nspaces, namely, poor scalability and the need for an unnecessarily large number\nof experts, degrading the predictive performance and increasing uncertainty. We\nconstruct a novel model to address these issues through a nested partitioning\nscheme that automatically infers the number of components at both levels.\nMultiple response types are accommodated through a generalised GP framework,\nwhile multiple input types are included through a factorised exponential family\nstructure. We show the effectiveness of our approach in estimating a\nparsimonious probabilistic description of both synthetic data of increasing\ndimension and an Alzheimer's challenge dataset.", + "authors": "Charles W. L. Gadd, Sara Wade, Alexis Boukouvalas", + "published": "2019-05-30", + "updated": "2019-05-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.02043v1", + "title": "mixdistreg: An R Package for Fitting Mixture of Experts Distributional Regression with Adaptive First-order Methods", + "abstract": "This paper presents a high-level description of the R software package\nmixdistreg to fit mixture of experts distributional regression models. The\nproposed framework is implemented in R using the deepregression software\ntemplate, which is based on TensorFlow and follows the neural structured\nadditive learning principle. The software comprises various approaches as\nspecial cases, including mixture density networks and mixture regression\napproaches. Various code examples are given to demonstrate the package's\nfunctionality.", + "authors": "David R\u00fcgamer", + "published": "2023-02-04", + "updated": "2023-02-04", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.03994v1", + "title": "Video Relationship Detection Using Mixture of Experts", + "abstract": "Machine comprehension of visual information from images and videos by neural\nnetworks faces two primary challenges. Firstly, there exists a computational\nand inference gap in connecting vision and language, making it difficult to\naccurately determine which object a given agent acts on and represent it\nthrough language. Secondly, classifiers trained by a single, monolithic neural\nnetwork often lack stability and generalization. To overcome these challenges,\nwe introduce MoE-VRD, a novel approach to visual relationship detection\nutilizing a mixture of experts. MoE-VRD identifies language triplets in the\nform of < subject, predicate, object> tuples to extract relationships from\nvisual processing. Leveraging recent advancements in visual relationship\ndetection, MoE-VRD addresses the requirement for action recognition in\nestablishing relationships between subjects (acting) and objects (being acted\nupon). In contrast to single monolithic networks, MoE-VRD employs multiple\nsmall models as experts, whose outputs are aggregated. Each expert in MoE-VRD\nspecializes in visual relationship learning and object tagging. By utilizing a\nsparsely-gated mixture of experts, MoE-VRD enables conditional computation and\nsignificantly enhances neural network capacity without increasing computational\ncomplexity. Our experimental results demonstrate that the conditional\ncomputation capabilities and scalability of the mixture-of-experts approach\nlead to superior performance in visual relationship detection compared to\nstate-of-the-art methods.", + "authors": "Ala Shaabana, Zahra Gharaee, Paul Fieguth", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1703.09302v1", + "title": "Speech Enhancement using a Deep Mixture of Experts", + "abstract": "In this study we present a Deep Mixture of Experts (DMoE) neural-network\narchitecture for single microphone speech enhancement. By contrast to most\nspeech enhancement algorithms that overlook the speech variability mainly\ncaused by phoneme structure, our framework comprises a set of deep neural\nnetworks (DNNs), each one of which is an 'expert' in enhancing a given speech\ntype corresponding to a phoneme. A gating DNN determines which expert is\nassigned to a given speech segment. A speech presence probability (SPP) is then\nobtained as a weighted average of the expert SPP decisions, with the weights\ndetermined by the gating DNN. A soft spectral attenuation, based on the SPP, is\nthen applied to enhance the noisy speech signal. The experts and the gating\ncomponents of the DMoE network are trained jointly. As part of the training,\nspeech clustering into different subsets is performed in an unsupervised\nmanner. Therefore, unlike previous methods, a phoneme-labeled database is not\nrequired for the training procedure. A series of experiments with different\nnoise types verified the applicability of the new algorithm to the task of\nspeech enhancement. The proposed scheme outperforms other schemes that either\ndo not consider phoneme structure or use a simpler training methodology.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2017-03-27", + "updated": "2017-03-27", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.05238v3", + "title": "Universal Simultaneous Machine Translation with Mixture-of-Experts Wait-k Policy", + "abstract": "Simultaneous machine translation (SiMT) generates translation before reading\nthe entire source sentence and hence it has to trade off between translation\nquality and latency. To fulfill the requirements of different translation\nquality and latency in practical applications, the previous methods usually\nneed to train multiple SiMT models for different latency levels, resulting in\nlarge computational costs. In this paper, we propose a universal SiMT model\nwith Mixture-of-Experts Wait-k Policy to achieve the best translation quality\nunder arbitrary latency with only one trained model. Specifically, our method\nemploys multi-head attention to accomplish the mixture of experts where each\nhead is treated as a wait-k expert with its own waiting words number, and given\na test latency and source inputs, the weights of the experts are accordingly\nadjusted to produce the best translation. Experiments on three datasets show\nthat our method outperforms all the strong baselines under different latency,\nincluding the state-of-the-art adaptive policy.", + "authors": "Shaolei Zhang, Yang Feng", + "published": "2021-09-11", + "updated": "2022-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.16710v1", + "title": "Prediction Sets for High-Dimensional Mixture of Experts Models", + "abstract": "Large datasets make it possible to build predictive models that can capture\nheterogenous relationships between the response variable and features. The\nmixture of high-dimensional linear experts model posits that observations come\nfrom a mixture of high-dimensional linear regression models, where the mixture\nweights are themselves feature-dependent. In this paper, we show how to\nconstruct valid prediction sets for an $\\ell_1$-penalized mixture of experts\nmodel in the high-dimensional setting. We make use of a debiasing procedure to\naccount for the bias induced by the penalization and propose a novel strategy\nfor combining intervals to form a prediction set with coverage guarantees in\nthe mixture setting. Synthetic examples and an application to the prediction of\ncritical temperatures of superconducting materials show our method to have\nreliable practical performance.", + "authors": "Adel Javanmard, Simeng Shao, Jacob Bien", + "published": "2022-10-30", + "updated": "2022-10-30", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.01899v1", + "title": "MiCE: Mixture of Contrastive Experts for Unsupervised Image Clustering", + "abstract": "We present Mixture of Contrastive Experts (MiCE), a unified probabilistic\nclustering framework that simultaneously exploits the discriminative\nrepresentations learned by contrastive learning and the semantic structures\ncaptured by a latent mixture model. Motivated by the mixture of experts, MiCE\nemploys a gating function to partition an unlabeled dataset into subsets\naccording to the latent semantics and multiple experts to discriminate distinct\nsubsets of instances assigned to them in a contrastive learning manner. To\nsolve the nontrivial inference and learning problems caused by the latent\nvariables, we further develop a scalable variant of the\nExpectation-Maximization (EM) algorithm for MiCE and provide proof of the\nconvergence. Empirically, we evaluate the clustering performance of MiCE on\nfour widely adopted natural image datasets. MiCE achieves significantly better\nresults than various previous methods and a strong contrastive learning\nbaseline.", + "authors": "Tsung Wei Tsai, Chongxuan Li, Jun Zhu", + "published": "2021-05-05", + "updated": "2021-05-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1911.08151v2", + "title": "Retrospective and Prospective Mixture-of-Generators for Task-oriented Dialogue Response Generation", + "abstract": "Dialogue response generation (DRG) is a critical component of task-oriented\ndialogue systems (TDSs). Its purpose is to generate proper natural language\nresponses given some context, e.g., historical utterances, system states, etc.\nState-of-the-art work focuses on how to better tackle DRG in an end-to-end way.\nTypically, such studies assume that each token is drawn from a single\ndistribution over the output vocabulary, which may not always be optimal.\nResponses vary greatly with different intents, e.g., domains, system actions.\n We propose a novel mixture-of-generators network (MoGNet) for DRG, where we\nassume that each token of a response is drawn from a mixture of distributions.\nMoGNet consists of a chair generator and several expert generators. Each expert\nis specialized for DRG w.r.t. a particular intent. The chair coordinates\nmultiple experts and combines the output they have generated to produce more\nappropriate responses. We propose two strategies to help the chair make better\ndecisions, namely, a retrospective mixture-of-generators (RMoG) and prospective\nmixture-of-generators (PMoG). The former only considers the historical\nexpert-generated responses until the current time step while the latter also\nconsiders possible expert-generated responses in the future by encouraging\nexploration. In order to differentiate experts, we also devise a\nglobal-and-local (GL) learning scheme that forces each expert to be specialized\ntowards a particular intent using a local loss and trains the chair and all\nexperts to coordinate using a global loss.\n We carry out extensive experiments on the MultiWOZ benchmark dataset. MoGNet\nsignificantly outperforms state-of-the-art methods in terms of both automatic\nand human evaluations, demonstrating its effectiveness for DRG.", + "authors": "Jiahuan Pei, Pengjie Ren, Christof Monz, Maarten de Rijke", + "published": "2019-11-19", + "updated": "2020-02-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2004.03751v4", + "title": "Robust Fitting of Mixture Models using Weighted Complete Estimating Equations", + "abstract": "Mixture modeling, which considers the potential heterogeneity in data, is\nwidely adopted for classification and clustering problems. Mixture models can\nbe estimated using the Expectation-Maximization algorithm, which works with the\ncomplete estimating equations conditioned by the latent membership variables of\nthe cluster assignment based on the hierarchical expression of mixture models.\nHowever, when the mixture components have light tails such as a normal\ndistribution, the mixture model can be sensitive to outliers. This study\nproposes a method of weighted complete estimating equations (WCE) for the\nrobust fitting of mixture models. Our WCE introduces weights to complete\nestimating equations such that the weights can automatically downweight the\noutliers. The weights are constructed similarly to the density power divergence\nfor mixture models, but in our WCE, they depend only on the component\ndistributions and not on the whole mixture. A novel\nexpectation-estimating-equation (EEE) algorithm is also developed to solve the\nWCE. For illustrative purposes, a multivariate Gaussian mixture, a mixture of\nexperts, and a multivariate skew normal mixture are considered, and how our EEE\nalgorithm can be implemented for these specific models is described. The\nnumerical performance of the proposed robust estimation method was examined\nusing simulated and real datasets.", + "authors": "Shonosuke Sugasawa, Genya Kobayashi", + "published": "2020-04-08", + "updated": "2022-03-17", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2205.01848v2", + "title": "Optimizing Mixture of Experts using Dynamic Recompilations", + "abstract": "The Mixture of Experts architecture allows for outrageously large neural\nnetworks by scaling model parameter size independently from computational\ndemand (FLOPs). However, current DNN frameworks cannot effectively support the\ndynamic data flow in Mixture of Experts, and implementations on top of these\nframeworks need to use workarounds that introduce significant overheads. To\naddress the limitation of these frameworks, we present DynaMoE, a DNN library\nthat uses dynamic recompilations to optimize and adapt the use of computational\nresources to the dynamic needs of Mixture of Experts models. Our evaluation\nshows that DynaMoE achieves a 1.8x speedup and supports 2.3x larger model sizes\nwhen compared to existing MoE systems, even when not using recompilations. We\nthen present further optimizations enabled by dynamic recompilations that yield\nan additional 1.7x speedup while simultaneously reducing memory pressure and\nimproving model quality.", + "authors": "Ferdinand Kossmann, Zhihao Jia, Alex Aiken", + "published": "2022-05-04", + "updated": "2022-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.04693v2", + "title": "GraphMETRO: Mitigating Complex Graph Distribution Shifts via Mixture of Aligned Experts", + "abstract": "Graph data are inherently complex and heterogeneous, leading to a high\nnatural diversity of distributional shifts. However, it remains unclear how to\nbuild machine learning architectures that generalize to complex non-synthetic\ndistributional shifts naturally occurring in the real world. Here we develop\nGraphMETRO, a Graph Neural Network architecture, that reliably models natural\ndiversity and captures complex distributional shifts. GraphMETRO employs a\nMixture-of-Experts (MoE) architecture with a gating model and multiple expert\nmodels, where each expert model targets a specific distributional shift to\nproduce a shift-invariant representation, and the gating model identifies shift\ncomponents. Additionally, we design a novel objective that aligns the\nrepresentations from different expert models to ensure smooth optimization.\nGraphMETRO achieves state-of-the-art results on four datasets from GOOD\nbenchmark comprised of complex and natural real-world distribution shifts,\nimproving by 67% and 4.2% on WebKB and Twitch datasets.", + "authors": "Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, Jure Leskovec", + "published": "2023-12-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.06966v1", + "title": "Acquiring Diverse Skills using Curriculum Reinforcement Learning with Mixture of Experts", + "abstract": "Reinforcement learning (RL) is a powerful approach for acquiring a\ngood-performing policy. However, learning diverse skills is challenging in RL\ndue to the commonly used Gaussian policy parameterization. We propose\n\\textbf{Di}verse \\textbf{Skil}l \\textbf{L}earning (Di-SkilL), an RL method for\nlearning diverse skills using Mixture of Experts, where each expert formalizes\na skill as a contextual motion primitive. Di-SkilL optimizes each expert and\nits associate context distribution to a maximum entropy objective that\nincentivizes learning diverse skills in similar contexts. The per-expert\ncontext distribution enables automatic curricula learning, allowing each expert\nto focus on its best-performing sub-region of the context space. To overcome\nhard discontinuities and multi-modalities without any prior knowledge of the\nenvironment's unknown context probability space, we leverage energy-based\nmodels to represent the per-expert context distributions and demonstrate how we\ncan efficiently train them using the standard policy gradient objective. We\nshow on challenging robot simulation tasks that Di-SkilL can learn diverse and\nperformant skills.", + "authors": "Onur Celik, Aleksandar Taranovic, Gerhard Neumann", + "published": "2024-03-11", + "updated": "2024-03-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.09179v1", + "title": "SiRA: Sparse Mixture of Low Rank Adaptation", + "abstract": "Parameter Efficient Tuning has been an prominent approach to adapt the Large\nLanguage Model to downstream tasks. Most previous works considers adding the\ndense trainable parameters, where all parameters are used to adapt certain\ntask. We found this less effective empirically using the example of LoRA that\nintroducing more trainable parameters does not help. Motivated by this we\ninvestigate the importance of leveraging \"sparse\" computation and propose SiRA:\nsparse mixture of low rank adaption. SiRA leverages the Sparse Mixture of\nExpert(SMoE) to boost the performance of LoRA. Specifically it enforces the top\n$k$ experts routing with a capacity limit restricting the maximum number of\ntokens each expert can process. We propose a novel and simple expert dropout on\ntop of gating network to reduce the over-fitting issue. Through extensive\nexperiments, we verify SiRA performs better than LoRA and other mixture of\nexpert approaches across different single tasks and multitask settings.", + "authors": "Yun Zhu, Nevan Wichers, Chu-Cheng Lin, Xinyi Wang, Tianlong Chen, Lei Shu, Han Lu, Canoee Liu, Liangchen Luo, Jindong Chen, Lei Meng", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2207.09094v1", + "title": "MoEC: Mixture of Expert Clusters", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its\npromising scaling capability with affordable computational overhead. MoE\nconverts dense layers into sparse experts, and utilizes a gated routing network\nto make experts conditionally activated. However, as the number of experts\ngrows, MoE with outrageous parameters suffers from overfitting and sparse data\nallocation. Such problems are especially severe on tasks with limited data,\nthus hindering the progress for MoE models to improve performance by scaling\nup. In this work, we propose Mixture of Expert Clusters - a general approach to\nenable expert layers to learn more diverse and appropriate knowledge by\nimposing variance-based constraints on the routing stage. We further propose a\ncluster-level expert dropout strategy specifically designed for the expert\ncluster structure. Our experiments reveal that MoEC could improve performance\non machine translation and natural language understanding tasks, and raise the\nperformance upper bound for scaling up experts under limited data. We also\nverify that MoEC plays a positive role in mitigating overfitting and sparse\ndata allocation.", + "authors": "Yuan Xie, Shaohan Huang, Tianyu Chen, Furu Wei", + "published": "2022-07-19", + "updated": "2022-07-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.00361v1", + "title": "AdaMoLE: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts", + "abstract": "We introduce AdaMoLE, a novel method for fine-tuning large language models\n(LLMs) through an Adaptive Mixture of Low-Rank Adaptation (LoRA) Experts.\nMoving beyond conventional methods that employ a static top-k strategy for\nactivating experts, AdaMoLE dynamically adjusts the activation threshold using\na dedicated threshold network, adaptively responding to the varying\ncomplexities of different tasks. By replacing a single LoRA in a layer with\nmultiple LoRA experts and integrating a gating function with the threshold\nmechanism, AdaMoLE effectively selects and activates the most appropriate\nexperts based on the input context. Our extensive evaluations across a variety\nof commonsense reasoning and natural language processing tasks show that\nAdaMoLE exceeds baseline performance. This enhancement highlights the\nadvantages of AdaMoLE's adaptive selection of LoRA experts, improving model\neffectiveness without a corresponding increase in the expert count. The\nexperimental validation not only confirms AdaMoLE as a robust approach for\nenhancing LLMs but also suggests valuable directions for future research in\nadaptive expert selection mechanisms, potentially broadening the scope for\noptimizing model performance across diverse language processing tasks.", + "authors": "Zefang Liu, Jiahua Luo", + "published": "2024-05-01", + "updated": "2024-05-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17749v1", + "title": "Multi-Task Dense Prediction via Mixture of Low-Rank Experts", + "abstract": "Previous multi-task dense prediction methods based on the Mixture of Experts\n(MoE) have received great performance but they neglect the importance of\nexplicitly modeling the global relations among all tasks. In this paper, we\npresent a novel decoder-focused method for multi-task dense prediction, called\nMixture-of-Low-Rank-Experts (MLoRE). To model the global task relationships,\nMLoRE adds a generic convolution path to the original MoE structure, where each\ntask feature can go through this path for explicit parameter sharing.\nFurthermore, to control the parameters and computational cost brought by the\nincrease in the number of experts, we take inspiration from LoRA and propose to\nleverage the low-rank format of a vanilla convolution in the expert network.\nSince the low-rank experts have fewer parameters and can be dynamically\nparameterized into the generic convolution, the parameters and computational\ncost do not change much with the increase of experts. Benefiting from this\ndesign, we increase the number of experts and its reception field to enlarge\nthe representation capacity, facilitating multiple dense tasks learning in a\nunified network. Extensive experiments on the PASCAL-Context and NYUD-v2\nbenchmarks show that our MLoRE achieves superior performance compared to\nprevious state-of-the-art methods on all metrics. Our code is available at\nhttps://github.com/YuqiYang213/MLoRE.", + "authors": "Yuqi Yang, Peng-Tao Jiang, Qibin Hou, Hao Zhang, Jinwei Chen, Bo Li", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1409.4698v1", + "title": "A Mixtures-of-Experts Framework for Multi-Label Classification", + "abstract": "We develop a novel probabilistic approach for multi-label classification that\nis based on the mixtures-of-experts architecture combined with recently\nintroduced conditional tree-structured Bayesian networks. Our approach captures\ndifferent input-output relations from multi-label data using the efficient\ntree-structured classifiers, while the mixtures-of-experts architecture aims to\ncompensate for the tree-structured restrictions and build a more accurate\nmodel. We develop and present algorithms for learning the model from data and\nfor performing multi-label predictions on future data instances. Experiments on\nmultiple benchmark datasets demonstrate that our approach achieves highly\ncompetitive results and outperforms the existing state-of-the-art multi-label\nclassification methods.", + "authors": "Charmgil Hong, Iyad Batal, Milos Hauskrecht", + "published": "2014-09-16", + "updated": "2014-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "I.2.6" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2206.00277v2", + "title": "Task-Specific Expert Pruning for Sparse Mixture-of-Experts", + "abstract": "The sparse Mixture-of-Experts (MoE) model is powerful for large-scale\npre-training and has achieved promising results due to its model capacity.\nHowever, with trillions of parameters, MoE is hard to be deployed on cloud or\nmobile environment. The inference of MoE requires expert parallelism, which is\nnot hardware-friendly and communication expensive. Especially for\nresource-limited downstream tasks, such sparse structure has to sacrifice a lot\nof computing efficiency for limited performance gains. In this work, we observe\nmost experts contribute scarcely little to the MoE fine-tuning and inference.\nWe further propose a general method to progressively drop the non-professional\nexperts for the target downstream task, which preserves the benefits of MoE\nwhile reducing the MoE model into one single-expert dense model. Our\nexperiments reveal that the fine-tuned single-expert model could preserve 99.3%\nbenefits from MoE across six different types of tasks while enjoying 2x\ninference speed with free communication cost.", + "authors": "Tianyu Chen, Shaohan Huang, Yuan Xie, Binxing Jiao, Daxin Jiang, Haoyi Zhou, Jianxin Li, Furu Wei", + "published": "2022-06-01", + "updated": "2022-06-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02629v2", + "title": "BA-MoE: Boundary-Aware Mixture-of-Experts Adapter for Code-Switching Speech Recognition", + "abstract": "Mixture-of-experts based models, which use language experts to extract\nlanguage-specific representations effectively, have been well applied in\ncode-switching automatic speech recognition. However, there is still\nsubstantial space to improve as similar pronunciation across languages may\nresult in ineffective multi-language modeling and inaccurate language boundary\nestimation. To eliminate these drawbacks, we propose a cross-layer language\nadapter and a boundary-aware training method, namely Boundary-Aware\nMixture-of-Experts (BA-MoE). Specifically, we introduce language-specific\nadapters to separate language-specific representations and a unified gating\nlayer to fuse representations within each encoder layer. Second, we compute\nlanguage adaptation loss of the mean output of each language-specific adapter\nto improve the adapter module's language-specific representation learning.\nBesides, we utilize a boundary-aware predictor to learn boundary\nrepresentations for dealing with language boundary confusion. Our approach\nachieves significant performance improvement, reducing the mixture error rate\nby 16.55\\% compared to the baseline on the ASRU 2019 Mandarin-English\ncode-switching challenge dataset.", + "authors": "Peikun Chen, Fan Yu, Yuhao Lian, Hongfei Xue, Xucheng Wan, Naijun Zheng, Huan Zhou, Lei Xie", + "published": "2023-10-04", + "updated": "2023-10-08", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.11449v2", + "title": "Dynamic Mixture of Experts Models for Online Prediction", + "abstract": "A mixture of experts models the conditional density of a response variable\nusing a mixture of regression models with covariate-dependent mixture weights.\nWe extend the finite mixture of experts model by allowing the parameters in\nboth the mixture components and the weights to evolve in time by following\nrandom walk processes. Inference for time-varying parameters in richly\nparameterized mixture of experts models is challenging. We propose a sequential\nMonte Carlo algorithm for online inference and based on a tailored proposal\ndistribution built on ideas from linear Bayes methods and the EM algorithm. The\nmethod gives a unified treatment for mixtures with time-varying parameters,\nincluding the special case of static parameters. We assess the properties of\nthe method on simulated data and on industrial data where the aim is to predict\nsoftware faults in a continuously upgraded large-scale software project.", + "authors": "Parfait Munezero, Mattias Villani, Robert Kohn", + "published": "2021-09-23", + "updated": "2022-10-13", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO", + "stat.AP" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.08245v1", + "title": "Scattered Mixture-of-Experts Implementation", + "abstract": "We present ScatterMoE, an implementation of Sparse Mixture-of-Experts (SMoE)\non GPUs. ScatterMoE builds upon existing implementations, and overcoming some\nof the limitations to improve inference and training speed, and memory\nfootprint. This implementation achieves this by avoiding padding and making\nexcessive copies of the input. We introduce ParallelLinear, the main component\nwe use to build our implementation and the various kernels used to speed up the\noperation. We benchmark our implementation against Megablocks, and show that it\nenables a higher throughput and lower memory footprint. We also show how\nParallelLinear enables extension of the Mixture-of-Experts concept by\ndemonstrating with an implementation of Mixture of Attention.", + "authors": "Shawn Tan, Yikang Shen, Rameswar Panda, Aaron Courville", + "published": "2024-03-13", + "updated": "2024-03-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1110.2058v2", + "title": "Convergence Rates for Mixture-of-Experts", + "abstract": "In mixtures-of-experts (ME) model, where a number of submodels (experts) are\ncombined, there have been two longstanding problems: (i) how many experts\nshould be chosen, given the size of the training data? (ii) given the total\nnumber of parameters, is it better to use a few very complex experts, or is it\nbetter to combine many simple experts? In this paper, we try to provide some\ninsights to these problems through a theoretic study on a ME structure where\n$m$ experts are mixed, with each expert being related to a polynomial\nregression model of order $k$. We study the convergence rate of the maximum\nlikelihood estimator (MLE), in terms of how fast the Kullback-Leibler\ndivergence of the estimated density converges to the true density, when the\nsample size $n$ increases. The convergence rate is found to be dependent on\nboth $m$ and $k$, and certain choices of $m$ and $k$ are found to produce\noptimal convergence rates. Therefore, these results shed light on the two\naforementioned important problems: on how to choose $m$, and on how $m$ and $k$\nshould be compromised, for achieving good convergence rates.", + "authors": "Eduardo F. Mendes, Wenxin Jiang", + "published": "2011-10-10", + "updated": "2011-11-01", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05444v1", + "title": "Pushing Mixture of Experts to the Limit: Extremely Parameter Efficient MoE for Instruction Tuning", + "abstract": "The Mixture of Experts (MoE) is a widely known neural architecture where an\nensemble of specialized sub-models optimizes overall performance with a\nconstant computational cost. However, conventional MoEs pose challenges at\nscale due to the need to store all experts in memory. In this paper, we push\nMoE to the limit. We propose extremely parameter-efficient MoE by uniquely\ncombining MoE architecture with lightweight experts.Our MoE architecture\noutperforms standard parameter-efficient fine-tuning (PEFT) methods and is on\npar with full fine-tuning by only updating the lightweight experts -- less than\n1% of an 11B parameters model. Furthermore, our method generalizes to unseen\ntasks as it does not depend on any prior task knowledge. Our research\nunderscores the versatility of the mixture of experts architecture, showcasing\nits ability to deliver robust performance even when subjected to rigorous\nparameter constraints. Our code used in all the experiments is publicly\navailable here: https://github.com/for-ai/parameter-efficient-moe.", + "authors": "Ted Zadouri, Ahmet \u00dcst\u00fcn, Arash Ahmadian, Beyza Ermi\u015f, Acyr Locatelli, Sara Hooker", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + } + ], + [ + { + "url": "http://arxiv.org/abs/2306.11027v1", + "title": "JiuZhang 2.0: A Unified Chinese Pre-trained Language Model for Multi-task Mathematical Problem Solving", + "abstract": "Although pre-trained language models~(PLMs) have recently advanced the\nresearch progress in mathematical reasoning, they are not specially designed as\na capable multi-task solver, suffering from high cost for multi-task deployment\n(\\eg a model copy for a task) and inferior performance on complex mathematical\nproblems in practical applications. To address these issues, in this paper, we\npropose \\textbf{JiuZhang~2.0}, a unified Chinese PLM specially for multi-task\nmathematical problem solving. Our idea is to maintain a moderate-sized model\nand employ the \\emph{cross-task knowledge sharing} to improve the model\ncapacity in a multi-task setting. Specially, we construct a\nMixture-of-Experts~(MoE) architecture for modeling mathematical text, so as to\ncapture the common mathematical knowledge across tasks. For optimizing the MoE\narchitecture, we design \\emph{multi-task continual pre-training} and\n\\emph{multi-task fine-tuning} strategies for multi-task adaptation. These\ntraining strategies can effectively decompose the knowledge from the task data\nand establish the cross-task sharing via expert networks. In order to further\nimprove the general capacity of solving different complex tasks, we leverage\nlarge language models~(LLMs) as complementary models to iteratively refine the\ngenerated solution by our PLM, via in-context learning. Extensive experiments\nhave demonstrated the effectiveness of our model.", + "authors": "Wayne Xin Zhao, Kun Zhou, Beichen Zhang, Zheng Gong, Zhipeng Chen, Yuanhang Zhou, Ji-Rong Wen, Jing Sha, Shijin Wang, Cong Liu, Guoping Hu", + "published": "2023-06-19", + "updated": "2023-06-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "This work focuses on solving mathematical problems, which has been extensively discussed in the literature [37, 38, 48]. Various resources or toolkits are released [8, 22, 28], and also empower a variety of math-related applications [3, 53, 58]. In the following, we will review the related study in three major technical approaches. Traditional NLP Approaches. Since mathematical problems are described in natural language, it is straightforward to cast the understanding of mathematical problems as a natural language processing (NLP) task. A major difficulty lies in the understanding of the formulas and logic that mathematical text contains. Thus, early NLP approaches typically extract the features for understanding the text and formulas, e.g., semantic parser [47] and operator tree [53]. In recent years, a surge of methods introduce the deep neural network into mathematical problem understanding. They generally leverage advanced NLP models, e.g., RNN [7] and Transformer [31], to encode the mathematical text into meaningful representations. PLM Based Approaches. Inspired by the success of PLMs in NLP tasks, researchers employ PLMs to deal with mathematical problems [41, 42], showing the superiority in understanding and modeling of mathematical texts. Basically, these methods continually pretrain PLMs (e.g., BERT [10]) with a specific math corpus, and design proper pre-training strategies to capture the semantics of the formulas and logics conveyed in the mathematical texts, e.g., text-formula representation alignment [18, 41], basic-to-advanced curriculum pre-training [56] and unified multi-task learning [39]. However, existing PLM approaches cannot well solve complex mathematical problems and also have a high cost in multi-task deployment. LLM Based Approaches. In contrast to PLMs with moderate sizes, large language models (LLMs) [4, 5, 57] are introduced to solve mathematical problems [8, 21, 30, 39]. Further, external modules or tools are used to assist LLMs in complex math problem solving, e.g., program interpreter [6, 13, 16]. Since it is very costly to tune LLMs, in-context learning [4] has been widely used to solve different tasks, e.g., chain-of-thought (CoT) method that uses multi-step reasoning [51]. Based on CoT, several improvements have been proposed for mathematical reasoning, including selecting more appropriate samples [15, 54], designing better instructions [25], generating multiple results for ranking [32, 50, 60] and decomposing problem into sub-problems [59]. However, it is hard for LLMs to adapt to the domains or tasks with large differences from the pretraining setting [21], e.g., Chinese mathematical problem solving. Besides, our model is built on MoE architecture [23], which aims to scale up the model capacity with controllable computational cost. For MoE architectures, it is important to design suitable expert network [43], routing mechanism [19, 26, 52] and training strategies [46, 52, 61]. While, our work has presented a novel application of MoE for dealing with mathematical tasks, with specific improvements. Our work is also related to multi-task learning based on language models [1, 35], while our focus is to share mathematical knowledge across. We design specific architecture and corresponding training strategies for mathematical problem solving, which distinguishes it from prior work on multi-task learning.", + "pre_questions": [], + "main_content": "INTRODUCTION Recently, the mathematical reasoning capacity of machines has been largely empowered by the progress of pre-trained language models (PLMs) [30, 39, 41, 56]. By pre-training on large-scale mathematical corpus with specially designed tasks, PLMs can understand the mathematical formulas and logic to a certain extent [56], achieving better performance on a variety of math-related tasks. Despite the progress, existing PLM based approaches still have two major limitations in real-world math-related applications. (1) Limited task performance: due to the limit of model capacity and pre-training data, PLMs are less capable of understanding complex mathematical problems, thus suffering from performance degradation on difficult tasks. (2) Large maintenance cost: an online application often supports multiple math-related tasks (e.g., similar problem retrieval and knowledge point classification), while PLMs need to be fine-tuned task by task when dealing with different downstream tasks, taking a significant cost of maintaining multitask solvers (e.g., a model copy for a task). By exploring the scaling laws, large language models (LLMs)1 [4, 5] can overcome the above issues to some extent with stronger mathematical reasoning ability. While, they are very costly to be tuned for task or domain adaptation. Although in-context learning [4] can be applied to solve different tasks in an efficient way (with no need for fine-tuning), it is still difficult to adapt them to 1In this paper, PLMs and LLMs refer to mathematical language models with moderate sizes (e.g., BERT [10]) and huge sizes (e.g., GPT-3 [4]), respectively. arXiv:2306.11027v1 [cs.CL] 19 Jun 2023 KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Wayne Xin Zhao et al. specific tasks that require rich domain knowledge, e.g., Englishfocused LLMs such as GPT-3 [4] and CodeX [5] cannot perform very well on Chinese mathematical problems (as shown in Table 2). Considering the above issues, we aim to develop a more effective Chinese PLM that can well adapt to multiple complex mathematical tasks, so as to better support math-related applications. To motivate our solution, we observe that mathematical tasks usually rely on common or related background knowledge, e.g., a multichoice problem and a blank-filling problem might target the same knowledge point though with different problem settings. Thus, it is intuitive to transfer and share mathematical knowledge across tasks by learning a unified model, so that the performance of each individual task can be potentially improved. In a multi-task manner, it also naturally reduces the cost of task-specific fine-tuning, since a joint model is trained with the data of all tasks. While, to become multi-task learner, it requires a higher generalization ability for solving different tasks [4, 44]. For this purpose, we further leverage existing LLMs that implicitly encode large amounts of knowledge to enhance the capacity of complex problem solving for PLMs. To this end, in this paper, we propose JiuZhang 2.0, a unified Chinese PLM specially for multi-task mathematical problem solving. In order to enhance the multi-task capacity, we make three major technical contributions. Firstly, we design a Mixture-of-Experts (MoE) based architecture to transfer and share mathematical knowledge across tasks. We adopt the MoE architecture to encode mathematical text with an elaborately designed routing mechanism. Secondly, we design multi-task continual pre-training and multi-task fine-tuning strategies to optimize the MoE-based architecture for multi-task adaptation. For multi-task continual pre-training, we construct a group of self-supervised pre-training tasks to warm up the MoE architecture for knowledge sharing; for multi-task fine-tuning, we unify the math-related tasks into two general formats of language understanding and generation, and directly enhance the knowledge sharing across these tasks. Thirdly, in order to further improve the general capacity of solving different complex tasks, we leverage LLMs as complementary models to improve the generated solution by our PLM. The PLM (with a smaller tuning cost) is used for task adaptation and generates a preliminary solution, while the LLM (with a stronger model capacity) mainly refines the generated results without directly solving the problem. Concretely, we retrieve similar examples and iteratively concatenate instructions with them to compose the prompt, gradually guiding the LLM to improve the generation results in a coarse-to-fine manner (overall logic, deduction process and language expressions). To verify the effectiveness of our proposed JiuZhang 2.0, we conduct extensive experiments on eight tasks, covering both the evaluation settings of seen tasks and unseen tasks. Experimental results have shown that our approach can consistently outperform a number of competitive baseline methods (even LLM based methods). Besides, we deploy our model in a Chinese education app and online \ud835\udc34/\ud835\udc35test further verifies the effectiveness of our approach. In this section, we present our JiuZhang 2.0, which is developed based on the former version of JiuZhang by introducing specific improvements for multi-task mathematical problem solving. JiuZhang 2.0: A Unified Chinese Pre-trained Language Model for Multi-task Mathematical Problem Solving KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Pre-training Tasks: MLM, DAE, SSR, SFR, GSC, USC \ud440\ud462\ud459\ud461\ud456\u2212\ud461\ud44e\ud460\ud458\ud443\ud45f\ud452\u2212\ud461\ud45f\ud44e\ud456\ud45b\ud456\ud45b\ud454 \ud43f\ud45c\ud460\ud460= \ud43f\uffff\uffff+ \ud43f\uffff+ \ud43f\uffff Add + Norm MoE FFN Add + Norm Self-Attention Expert 1 Expert K \u2026 \ud445(\ud489) Scores All Experts Weighted Sum \ud489 \ud489 \ud489 Pre-training Fine-tuning Routing Network \ud445 \ud489 \u2295sum \ud491 MoE Enhanced Achitecture \ud448-Decoder \ud43a-Decoder \u00d7N Shared Encoder \ud440\ud462\ud459\ud461\ud456\u2212\ud461\ud44e\ud460\ud458\ud46d\ud48a\ud48f\ud486\u2212\ud495\ud496\ud48f\ud48a\ud48f\ud488 \ud43f\ud45c\ud460\ud460= \ud43f\uffff\uffff+ \ud43f\uffff\uffff+ \ud43f\uffff\uffff IRL: Iterative Refinement via LLM LLM (e.g., CodeX) Prompt 1. Question 2. Generated Analysis 3. Exemplars 4. Instruction Generated Analysis Refined Generated Analysis JiuZhang as Retriever Exemplars Build Prompt sin( \uffff \uffff \u2212 \ud6fc) = cos [\ud745(\ud6fc+ \uffff \uffff )] =sin(\ud6fc+ \uffff \ud7d4) = \uffff\uffff \uffff . Therefore, choose A Multi-task Inputs cos( \uffff \uffff \u2212 \ud6fc) = cos [ \ud745 \ud7d0(\ud6fc+ \uffff \uffff )] = sin(\ud6fc+ \uffff \ud7d4) = \uffff\uffff \ud7d3 . Therefore, choose C Fine-tuning Tasks: Analysis Generation, Question Answering, Classification Training Data Retrieve Exemplars [Analysis Generation Task] Question: What is the result of cos( \uffff \uffff\ud6fc)? A. \uffff \uffff B. \uffff\uffff \uffff C.\u2212 \uffff \uffffD. \u2212 \uffff\uffff \uffff sin( \uffff \uffff \u2212 \ud6fc) = cos [\ud745(\ud6fc+ \uffff \uffff )] =sin(\ud6fc+ \uffff \ud7d4) = \uffff\uffff \uffff . Therefore, choose A cos( \uffff \uffff \u2212 \ud6fc) = cos [ \uffff \uffff(\ud6fc+ \uffff \uffff )] = sin(\ud6fc+ \uffff \uffff) = \uffff\uffff \uffff . Therefore, choose B Figure 1: The overview of our model JiuZhang 2.0, consisting of two major parts: MoE extension with multi-task training based on the PLM (the primary role) and iterative refinement via LLM (the complementary role). The red bold tokens are errors generated by JiuZhang, which are corrected by LLM in the later iterative refinement process. 3.1 Backbone Model: JiuZhang We first introduce the backbone model JiuZhang [56] for mathematical problem understanding. Unlike general-purpose PLMs (e.g., BERT [10]), JiuZhang considers the pre-training corpus of mathematical text, in which each text consists of a sequence of \ud835\udc5btokens (either a text word or a math symbol) corresponding to a mathematical problem (including both problem statement and possible solution), denoted as \ud835\udc51= {\ud835\udc611,\ud835\udc612, \u00b7 \u00b7 \u00b7 ,\ud835\udc61\ud835\udc5b}. Next, we introduce the original architecture and pre-training tasks for JiuZhang [56]. Architecture. Since both understanding and generation capacities are needed for mathematical problem solving, JiuZhang adopts an architecture consisting of one shared encoder and two task-specific decoders: one decoder for understanding tasks (\ud835\udc48-decoder) and the other decoder for generation tasks (\ud835\udc3a-decoder). It employs bidirectional Transformers to implement the shared encoder and the \ud835\udc48-decoder, and an auto-regressive Transformer to implement the\ud835\udc3adecoder. In order to enhance the representation ability, the shared encoder is built with more layers than the two decoders (i.e., 10 layers v.s. 2 layers). Given a mathematical text \ud835\udc51= {\ud835\udc611, \u00b7 \u00b7 \u00b7 ,\ud835\udc61\ud835\udc5b}, the shared encoder can produce contextualized token representations {h(\ud835\udc3f) 1 , h(\ud835\udc3f) 2 , \u00b7 \u00b7 \u00b7 , h(\ud835\udc3f) \ud835\udc5b } (\ud835\udc3f-layer architecture) by capturing mathematical semantics from the input text. Then, the \ud835\udc48-decoder and \ud835\udc3a-decoder will solve the understanding and generation tasks based on the contextualized representations, respectively. Pre-training Tasks. In the former version, JiuZhang sets up three types of pre-training tasks and schedules them in a curriculum learning approach. The basic course is constructed based on masked token prediction following general-purpose PLMs, with two pretraining tasks of masked language modeling (\ud835\udc3f\ud835\udc40\ud835\udc3f\ud835\udc40) and denoised auto-encoder (\ud835\udc3f\ud835\udc37\ud835\udc34\ud835\udc38). The advanced course is constructed based on specific considerations of mathematical text, including mathematical logic recovering and solution checking. For mathematical logic recovering, we introduce the pre-training tasks of shuffled sentences recovering (\ud835\udc3f\ud835\udc46\ud835\udc46\ud835\udc45) and shuffled formulas recovering (\ud835\udc3f\ud835\udc46\ud835\udc39\ud835\udc45), in order to enhance the understanding of mathematical logic; for solution checking, we introduce the pre-training tasks of dual-decoder solution checking (\ud835\udc3f\ud835\udc3a\ud835\udc46\ud835\udc36and \ud835\udc3f\ud835\udc48\ud835\udc46\ud835\udc36), which improve the model\u2019s ability to detect and correct errors in its own generated outputs. These pre-training tasks can gradually adapt JiuZhang to mathematical problem solving. Due to space limit, please refer to original paper [56] for more details. Although JiuZhang can better model mathematical text compared with general-purpose PLMs, it is not specially designed for multi-task mathematical problem solving. In order to enhance the multi-task capacity, we next introduce two important improvements, namely MoE extension with multi-task training (Section 3.2) and iterative refinement with LLM (Section 3.3). In the following, we introduce the two parts in detail. 3.2 MoE Extension with Multi-task Training By leveraging a corpus of mathematical text, JiuZhang implicitly captures mathematical knowledge with specially designed pretraining tasks. While, such information is encoded via a whole model (i.e., the shared encoder), and it is difficult to transfer mathematical knowledge across different tasks. To better decompose and share the mathematical knowledge, we propose to enhance the backbone model with Mixture-of-Experts (MoE) [46] extension, and introduce multi-task continual pre-training and multi-task finetuning strategies based on MoE-enhanced architecture. KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Wayne Xin Zhao et al. 3.2.1 MoE Extension for Knowledge Sharing. MoE [23] is a widely used technique to increase model capacity by incorporating multiple expert networks (the same architecture yet different parameters). While, we employ MoE to decouple and share mathematical knowledge across tasks: common knowledge for related tasks can be captured in one specific expert and less irrelevant knowledge across different tasks is distributed among multiple experts. MoE Layer for Mathematical Text. In our approach, we only extend the deep shared encoder (capturing the essential mathematical knowledge) with MoE, but not the shallow decoders (supporting different types of tasks). As the encoder is composed of multiple bidirectional Transformer layers, we incorporate the MoE layer to substitute for the original feed-forward layer. Each MoE layer consists of a routing network \ud835\udc45(\u00b7) and multiple expert networks {\ud835\udc38\ud835\udc56(\u00b7)}\ud835\udc3e \ud835\udc56=1, where \ud835\udc3edenotes the number of expert candidates. To reuse the encoded knowledge from JiuZhang, we utilize the parameters of its feed-forward layer to initialize the parameters of the expert networks, which can also improve the training stability. Since a mathematical problem is usually related to diverse knowledge points, we adopt a token-wise routing mechanism [46] to decouple its associated mathematical knowledge, by assigning experts individually for each token. Given an input mathematical text \ud835\udc51= {\ud835\udc611, \u00b7 \u00b7 \u00b7 ,\ud835\udc61\ud835\udc5b}, in each Transformer layer, the multi-head self-attention layer first produces the aggregated representations of all these tokens {\ud835\udc891, \u00b7 \u00b7 \u00b7 , \ud835\udc89\ud835\udc5b}. Then, for each token, the routing network estimates the probability distribution over the \ud835\udc3eexperts: \ud835\udc45(\ud835\udc89) = softmax\u0000\ud835\udc7e\u00b7 \ud835\udc89\u0001, (1) where\ud835\udc7eis the trainable matrix for deriving the routing distribution. Further, we employ a weighted combination to integrate the outputs from the \ud835\udc3eexperts: MoE(\ud835\udc89) = \ud835\udc3e \u2211\ufe01 \ud835\udc56=1 \ud835\udc45(\ud835\udc89)\ud835\udc56\u00d7 \ud835\udc38\ud835\udc56(\ud835\udc89). (2) Sparsely Routing with Jitter Noise. To save the computational cost in MoE layers, we introduce the sparse activation mechanism [46] to selectively utilize expert networks for each token. Specifically, according to the estimated probability distribution \ud835\udc45(\ud835\udc89), we first rank all the expert networks and then select the top-\ud835\udc58 ones (\ud835\udc58\u2264\ud835\udc3e) in Eq. (2) to derive the token representation. Here, we set \ud835\udc58= 1, i.e., only the most related expert will be routed for each token. In this way, for each token, the computational cost of the expert network is roughly the same as the original feed-forward layer of JiuZhang. More detailed analysis about inference latency can be found in Appendix B. However, prior studies [14] have found that such a sparse expert assignment approach would deterministically choose the best-ranking expert, causing the expert network easy to overfit. Therefore, we introduce randomness into the expert selection process by using the jitter noise [14] in the routing network. We multiply the estimated probability distribution in Eq. (1) by a jitter noise \ud835\udf50(a randomly scaling distribution vector) as: \ud835\udc45(\ud835\udc89) = softmax\u0000(\ud835\udc7e\u00b7 \ud835\udc89) \u2299\ud835\udf50\u0001, (3) where \ud835\udf50\u2208R\ud835\udc3eis a randomly sampled vector and each entry is from a uniform distribution [1\u2212\ud835\udf02, 1+\ud835\udf02] (with the noise degree controlling hyper-parameter \ud835\udf02), and \u201c\u2299\u201d is the element-wise product. In this way, the probability scores of different experts would be increased or decreased randomly, making the expert networks more robust to perturbations on the routing results. 3.2.2 Multi-task Pre-training for MoE Adaptation. In order to support the MoE architecture, we design multi-task continual pretraining strategies for adapting to the multi-task setting. Multi-task Continual Pre-training. The goal of multi-task pretraining is to decouple and transfer mathematical knowledge via expert sharing, according to task supervision. Since there is no task data during the pre-training stage, we consider reusing the original pre-training tasks of JiuZhang discussed in Section 3.1, including masked token prediction (\ud835\udc3f\ud835\udc40\ud835\udc3f\ud835\udc40and \ud835\udc3f\ud835\udc37\ud835\udc34\ud835\udc38), mathematical logic recovering (\ud835\udc3f\ud835\udc46\ud835\udc46\ud835\udc45and \ud835\udc3f\ud835\udc46\ud835\udc39\ud835\udc45) and solution checking (\ud835\udc3f\ud835\udc3a\ud835\udc46\ud835\udc36and \ud835\udc3f\ud835\udc48\ud835\udc46\ud835\udc36). Instead of using a curriculum learning way as in [56], we treat the six pre-training losses as equal optimization goals, and set a multitask pre-training objective: \ud835\udc3f\ud835\udc40\ud835\udc47= \ud835\udc3f\ud835\udc40\ud835\udc3f\ud835\udc40+ \ud835\udc3f\ud835\udc37\ud835\udc34\ud835\udc38+ \ud835\udc3f\ud835\udc46\ud835\udc46\ud835\udc45+ \ud835\udc3f\ud835\udc46\ud835\udc39\ud835\udc45+ \ud835\udc3f\ud835\udc48\ud835\udc46\ud835\udc36+ \ud835\udc3f\ud835\udc3a\ud835\udc46\ud835\udc36. (4) Note that our model has been initialized with the parameters of the former JiuZhang, so that it also implicitly benefits from the curriculum learning strategy proposed in the previous paper [56]. While, based on the MoE-based architecture, we employ these pre-training tasks to decouple and share mathematical knowledge across tasks. Auxiliary Losses for Improved Optimization. For MoE methods, there are two major training problems that affect the performance, i.e., the unbalanced load among experts [46] and the training instability [61]. To alleviate these problems, we adopt two auxiliary losses [46, 61] as the regularizers in our approach. Specially, the unbalanced load problem refers that certain experts are extremely frequently routed, which may cause the overfitting problem on these experts and the underfitting problem on other experts. Therefore, we aim to improve the unbalanced routing among all \ud835\udc3eexperts. Formally, we encourage the accumulated estimated probabilities for each expert to be uniform, denoted as: \ud835\udc3f\ud835\udc48= \ud835\udefc\u00b7 \ud835\udc3e\u00b7 \ud835\udc3e \u2211\ufe01 \ud835\udc56=1 \ud835\udc53\ud835\udc56\u00b7 \ud835\udc60\ud835\udc56, (5) where \ud835\udc53\ud835\udc56is the number of tokens dispatched to the\ud835\udc56-th expert, and\ud835\udc60\ud835\udc56 is the accumulated routing score estimated by the routing network for the \ud835\udc56-th expert, and \ud835\udefcis the coefficient to control the influence. According to [46], this loss encourages uniform routing since it would be minimized under a uniform distribution. Further, the training instability problem is often caused by the large volatility of the probability scores in the routing network. In order to control the volatility, we adopt the \ud835\udc4d-loss [61] that encourages the routing logits of all tokens (size \ud835\udc5b) to remain small as: \ud835\udc3f\ud835\udc4d= \ud835\udefd\u00b7 1 \ud835\udc5blog \ud835\udc5b \u2211\ufe01 \ud835\udc57=1 exp \u0000\ud835\udc45(\ud835\udc89\ud835\udc57)\u00012, (6) where \ud835\udefdis the coefficient for this loss. 3.2.3 Multi-task Fine-tuning for MoE Adaptation. To apply the pretrained model, a typical way is to fine-tune it on some downstream tasks. While, it cannot sufficiently leverage the merits of MoE-based architectures (i.e., decoupling and sharing), without considering JiuZhang 2.0: A Unified Chinese Pre-trained Language Model for Multi-task Mathematical Problem Solving KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA inter-task relationships. Thus, we design a multi-task fine-tuning strategy, which boosts the capacity of our MoE architecture by leveraging the data of all (available) downstream tasks. Unifying the Fine-tuning Tasks. For multi-task fine-tuning, we combine the available training data from multiple downstream tasks for jointly optimizing our model. Since these tasks that we consider are math related, they tend to rely on common mathematical knowledge for task solving, which can be captured via the MoE-based architecture. However, the formats of the input and output data for downstream tasks are generally different, making it hard to be jointly fine-tuned. Recall that our backbone model has included two specific decoders that can handle both understanding and generation tasks for mathematical text. Thus, we unify the math-related tasks into two general formats, either understanding or generation. Specially, for all text classification tasks, we merge the annotation labels and consider an extended multi-label setting, where the label dictionary covers the labels from all classification tasks. In this way, we can equip our \ud835\udc48-decoder with a multi-label classifier head to simultaneously accomplish all these classification tasks. Further, for all text generation tasks, we adopt a standard sequence-to-sequence format and utilize the \ud835\udc3a-decoder to solve them. To better distinguish the different tasks for our model, given the training data from \ud835\udc5atasks, we also devise \ud835\udc5atask prompt embeddings, denoted as {\ud835\udc911, \u00b7 \u00b7 \u00b7 , \ud835\udc91\ud835\udc5a}. For each instance, we insert its task prompt embedding after the [CLS] token embedding. Routing with Task Prompt. During multi-task fine-tuning, as the task type may be useful to determine the selection of different experts with specific mathematical knowledge, we further revise the routing mechanism by incorporating task-level instruction. Specially, in each MoE layer, we add the input token representation \ud835\udc89with the representation of the task prompt \ud835\udc91, to compose the input of the routing layer for estimating the probability distribution over the experts as: \ud835\udc45(\ud835\udc89) = softmax\u0000(\ud835\udc7e\u00b7 (\ud835\udc89+ \ud835\udc91)) \u2299\ud835\udf50\u0001, (7) where we also use jitter noise to improve the robustness. 3.3 Iterative Refinement via LLM Although MoE extension is employed to enhance the backbone model, we keep a moderate-sized model (i.e., 276\ud835\udc40for \ud835\udc3e= 4) with an affordable cost for downstream applications. Due to the limit in model size and pre-training data, it still has difficulty in generating solution text for some complex mathematical problems. Our solution is to leverage large language model (LLM) [4, 5] with stronger general modeling capacities for refining the generation results of our PLM. To achieve this, we first design a retrieval strategy to select the most relevant exemplars for constructing the prompts, and then devise an iterative prompting method that utilizes in-context learning to gradually correct the generated results. 3.3.1 Constructing Prompts Using Retrieved Samples. Since existing LLMs are mainly English-focused, they cannot sufficiently capture the necessary mathematical knowledge to effectively accomplish math-related tasks in Chinese (see experiments in Section 4.2). Thus, instead of directly solving the tasks, LLM plays a complementary role in our approach for refining the generated results of our PLM. Specifically, given a mathematical problem \ud835\udc5e, we first utilize the PLM (Section 3.2) to generate the solution text \u02c6 \ud835\udc4e, and then employ the LLM via in-context learning [4] to refine \u02c6 \ud835\udc4einto \u02dc \ud835\udc4ewith improved quality. To provide effective guidance on the LLM, we construct the prompts with retrieved relevant exemplars and specially designed natural language instructions. Retrieving Exemplars. As empirical studies [34] have revealed that the exemplars in the prompts of LLMs are important to the task performance, we retrieve relevant instances from the training data as the exemplars. Since exemplar finding is essentially an unsupervised text retrieval task, we further employ SimCSE [17] to enhance the representation capacity of our backbone model for semantic matching. Following SimCSE, we incorporate the dropout mechanism to augment positive representations and utilize the contrastive learning objective for training. In the retrieval stage, given the target problem \ud835\udc5eand the training data set as the retrieval candidate pool, we first encode all the mathematical problems into dense vectors by our backbone model, and then select the top-ranking problems as relevant exemplars, denoted as \ud835\udc36= {\u27e8\ud835\udc5e\ud835\udc57,\ud835\udc4e\ud835\udc57\u27e9}\ud835\udc35 \ud835\udc57=1, where \ud835\udc4e\ud835\udc57is the associated solution text for problem \ud835\udc5e\ud835\udc57. Note that we do not use the solution text for the target problem, while only utilizing the solution texts of the problems from training data. Building Prompts. In order to guide the LLM to refer to the retrieved exemplars for revising the generated result \u02c6 \ud835\udc4efrom our PLM, we utilize the in-context learning method with specially designed prompts. Specifically, the input of the LLM consists of four parts, i.e., the given question \ud835\udc5e, the generated result \u02c6 \ud835\udc4e, the retrieved exemplars \ud835\udc36= {\u27e8\ud835\udc5e\ud835\udc57,\ud835\udc4e\ud835\udc57\u27e9}\ud835\udc35 \ud835\udc57=1, and a natural language instruction \ud835\udc3c. We concatenate the above four parts into a long sentence, to compose the prompt template as: [\ud835\udc5e; \u02c6 \ud835\udc4e;\ud835\udc36; \ud835\udc3c] \u2192prompt(LLM), (8) where the instruction \ud835\udc3ccan be flexibly set according to different tasks. We will discuss how to set it in the following part. 3.3.2 Iterative Prompting for Result Refinement. Generally, the generated results from the PLM may contain a variety of mistakes (e.g., inconsistent logic and language typos), and it is hard for the LLM to completely check and correct all these mistakes at once. Therefore, we devise a three-stage iterative refining strategy that gradually improves the generated results following a coarse-to-fine manner. Concretely, based on the prompt template in Eq. (8), we design three specific instructions for the three stages, which guide the LLM to refine the generation results from the three perspectives of overall logic, deduction process and language expressions, respectively. We present the above instructions in the Appendix (Table 9). Further, to better cooperate with the above instructions, we also revise the way of retrieving exemplars in the three stages: \u2022 at the first stage, we only rely on the problem statement \ud835\udc5e for finding similar problems, referring to their overall logic; \u2022 at the second stage, we leverage both \ud835\udc5eand the generated solution text \u02c6 \ud835\udc4efor retrieving relevant problems with similar solution text, checking the deduction process; \u2022 at the third stage, we only utilize the generated solution text \u02c6 \ud835\udc4efor retrieval to find other similar solution texts, correcting improper language expressions. KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Wayne Xin Zhao et al. Table 1: Statistics of the datasets for eight evaluation tasks. \u201cSeen\u201d and \u201cUnseen\u201d refer that the task data is used or not used during multi-task fine-tuning, respectively. Setting Type Task Train Dev Test Seen QA tasks MCQ 22,000 3,982 7,466 BFQ 14,795 1,786 1,778 Generation CAG 16,000 1,976 1,977 BAG 14,795 1,786 1,778 Classification KPC 8,721 991 1,985 QRC 10,000 2,000 4,000 Unseen Generation JCAG 8,000 1,000 1,000 JBAG 8,000 1,000 1,000 To accomplish the goal for each individual stage, we find that it needs multiple iterations for LLM to produce ideal outputs. Thus, we perform \ud835\udc47-step (\ud835\udc47= 3) iterations for each stage. At each step, the refined output \u02dc \ud835\udc4e(\ud835\udc61) will be used as the input of the next step \u02c6 \ud835\udc4e(\ud835\udc61+1) to compose the prompt and the retrieved exemplars can also be updated according to new query \u02c6 \ud835\udc4e(\ud835\udc61+1). In this way, we can iteratively refine the generated results until the expected goal is fulfilled at each stage, and finally generate high-quality results. 4 EXPERIMENTS 4.1 Experimental Settings We utilize the same pre-training corpus of JiuZhang [56], consisting of 1,276,952 high-school math problems collected from Zhixuewang, and each problem is associated with the problem type, problem statement and solution text. We preprocess these collected texts in the same way as JiuZhang. Evaluation Tasks. We consider two different settings for evaluation, namely seen tasks and unseen tasks, referring to the task data that are used and not used, respectively, during multi-task finetuning. We split each task dataset into training/development/test sets. The statistics of these tasks are shown in Table 1. \u2022 Seen tasks consist of six tasks based on high-school math problems, including (1) two question answering tasks, i.e., MultipleChoice Question Answering (MCQ) and Blank-Filling Question Answering (BFQ); (2) two analysis generation tasks, i.e., MultipleChoice Analysis Generation (CAG) and Blank-Filling Analysis Generation (BAG); and (3) two classification tasks, i.e., Knowledge Point Classification (KPC) and Question Relation Classification (QRC). For these tasks, we perform multi-task fine-tuning with all training sets, select the model checkpoint with the best average performance on development sets, and then evaluate the results on test sets. \u2022 Unseen tasks consist of two analysis generation tasks based on junior high school math problems, i.e., Junior-high-school MultipleChoice Analysis Generation (JCAG) and Junior-high-school BlankFilling Analysis Generation (JBAG), which are not used in multitask fine-tuning for our model. For the two tasks, we perform task-specific fine-tuning, i.e., the multi-task fine-tuned model is separately optimized, tuned and evaluated for each task. We use the evaluation metrics following JiuZhang [56]. For classification tasks (KPC and QRC), we adopt Accuracy and F1-macro as the evaluation metrics. For question answering tasks (MCQ and BFQ), we adopt Accuracy for evaluation. For generation tasks (CAG, BAG, JCAG and JBAG), we use BLEU-4 [40], ROUGE-2 and ROUGEL [33] to evaluate the quality of the generated analysis, and also adopt Accuracy to evaluate the generated answers. Baseline Methods. We select the following four types of baselines: \u2022 Non-pretraining methods consist of classic neural network methods for text classification or generation, i.e., TextCNN [24], TextRCNN [27], Seq2Seq [2] and Transformer [49]. \u2022 Pre-trained language models have been pre-trained on largescale general corpus. We select BERT-Base [11], BART-Base [29], RoBERTa-wwm [9], CPT [45] and Mengzi [55]. For generation tasks, we fine-tune RoBERTa-wwm in a UniLM way [12], and utilize bi-directional attention for input and unidirectional attention for output to implement the Seq2Seq based training and inference. \u2022 Continual pre-training methods further pre-train PLMs on domain-specific corpus (our collected math corpus), and also adopt specially designed pre-training tasks. We select MathBERT [41], DAPT-BERT [20], DAPT-CPT, COMUS [18], JiuZhang [56]. Since our approach is also related to multi-task learning [1, 35], we also add a variant that extends JiuZhang [56] in a multi-task training strategy, MTDNN [35] for fine-tuning. \u2022 Chain-of-thought (CoT) methods add explanations to the exemplars in the input prompt of LLMs, to better guide them to generate correct answer [51]. We employ CoT on GPT-3 [4] and CodeX [5], i.e., GPT3-CoT and CodeX-CoT. Note that CoT methods rely on intermediate reasoning steps of the sampled exemplars in input to guide the solving of math problems, which are not available in the two classification tasks of KPC and QRC. While, in MCQ, BFQ, CAG and BAG tasks, we can utilize the analysis text to derive the intermediate reasoning steps, hence we only report the results of CoT methods on the four tasks. Implementation Details. For GPT3-CoT and CodeX-CoT, we follow the standard chain-of-thought way to construct the input prompts [51], and the numbers of sampled exemplars are set to 5 and 8, respectively, since GPT-3 has a smaller maximum input length than CodeX. During training, we use AdamW [36] as the optimizer with the learning rate of 3e-5, and warm up the learning rate for the first 5% steps then decay the weight with a ratio of 0.01. The coefficients of the auxiliary loss (Eq. (5)) and the \ud835\udc4d-loss (Eq. (6)) are 1e-3 and 1e-4, respectively. For the MoE structure, we set the number of experts \ud835\udc3e= 4 and the number of activated experts \ud835\udc58= 1. For continual multi-task pre-training, we pre-train our model with a batch size of 256 for 700000 steps. For multi-task fine-tuning, we fine-tune our model with a batch size of 32 for 80 epochs and adopt the routing mechanism with task prompt. For iterative refinement, we use CodeX [5] as the LLM and retrieve top-8 similar problems from the training set as exemplars for each input problem. More details are reported in Appendix A. 4.2 Main Results 4.2.1 Evaluation on Seen Tasks. For seen tasks, we evaluate the performance of our approach after multi-task fine-tuning. The results of the seen QA/generation and classification tasks are shown in Table 2 and Table 3, respectively, and we can observe that: JiuZhang 2.0: A Unified Chinese Pre-trained Language Model for Multi-task Mathematical Problem Solving KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Table 2: Main results on two question answering tasks and two analysis generation tasks in the setting of seen tasks. Here, \u201cAcc.\u201d denotes the metric Accuracy, and \u201cw/o IRL\u201d denotes removing the iterative refinement strategy using LLMs. The best and the second-best methods are denoted in bold and underlined fonts respectively. Tasks MCQ BFQ CAG BAG Metrics Acc. Acc. BLEU-4 ROUGE-2 ROUGE-L Acc. BLEU-4 ROUGE-2 ROUGE-L Acc. Seq2Seq 37.61 44.32 39.91 47.79 67.88 42.63 39.86 48.15 68.06 39.91 Transformer 35.33 46.57 41.39 48.50 67.09 41.02 41.91 48.80 67.76 45.95 RoBERTa-wwm 37.29 47.24 47.29 53.81 70.61 47.70 44.62 51.5 69.54 42.35 BART 36.15 46.82 48.20 55.04 71.66 48.92 45.46 52.16 69.62 43.92 CPT 37.90 46.31 47.98 54.97 71.67 47.03 44.82 52.29 70.01 40.68 DAPT-CPT 46.26 53.41 49.54 55.97 72.52 50.46 46.33 53.69 70.91 48.98 JiuZhang 47.73 54.60 50.05 56.51 72.99 54.51 47.73 54.36 71.17 51.82 JiuZhang-MTDNN 48.81 54.95 49.15 56.28 72.77 56.80 47.58 54.16 71.22 53.09 GPT3-CoT 36.15 50.39 46.93 53.59 70.65 55.18 45.82 52.35 69.43 50.39 CodeX-CoT 40.36 53.82 43.65 54.28 70.43 56.30 42.96 53.45 69.89 53.82 JiuZhang 2.0 w/o IRL 49.75 55.85 50.17 56.72 73.02 58.83 48.33 54.79 71.48 54.78 JiuZhang 2.0 50.37 58.77 50.72 56.97 73.14 60.19 49.39 55.61 71.69 58.77 Table 3: Main results on two basic classification tasks in the seen setting. Iterative refinement via LLM is not applicable to the two tasks. Tasks KPC QRC Metrics Acc. F1-macro Accu. F1-macro TextCNN 47.4 26.8 73.3 52.9 TextRCNN 55.3 38.8 79.6 59.0 BERT 59.6 34.9 82.7 63.4 RoBERTa-wwm 61.0 37.0 84.2 65.2 Mengzi 56.6 29.5 81.7 62.8 BART 62.7 41.9 82.0 63.0 CPT 66.2 48.4 82.8 63.4 DAPT-BERT 68.7 46.5 86.5 68.5 MathBert 68.9 47.1 85.3 69.8 COMUS 71.0 63.3 88.0 73.3 DAPT-CPT 72.0 58.0 88.8 76.7 JiuZhang 73.3 59.4 89.4 79.2 JiuZhang-MTDNN 71.5 58.4 89.2 77.1 JiuZhang 2.0 (w/o IRL) 73.5 61.2 89.9 79.8 First, continual pre-training methods (i.e., COMUS, DAPT-CPT, JiuZhang, JiuZhang-MTDNN) achieve better performance than general-purpose PLMs such as BART and CPT. The reason is that these methods have been continually pre-trained on the math corpus, which can learn useful mathematical knowledge from such texts. Among these continual pre-training methods, the two methods based on JiuZhang (i.e., JiuZhang and JiuZhang-MTDNN) mostly outperform all other methods. It is mainly because that JiuZhang incorporates three types of pre-training tasks, which is further pretrained in a curriculum learning way. While, JiuZhang-MTDNN revises the fine-tuning process of JiuZhang by adopting multi-task learning, which can improve the performance on MCQ and BFQ, but has worse performance on KPC and QRC tasks. A possible reason is that there exists negative interference among these tasks during multi-task learning. Besides, COMUS also performs well on the KPC task. Since the KPC task requires a deep understanding of the formulas in mathematical problems for predicting the knowledge points, COMUS specially designs graph neural networks and memory networks for modeling the formulas. Second, the chain-of-thought methods based on powerful LLMs (i.e., GPT3-CoT and CodeX-CoT) overall perform worse than continual pre-training methods on generation metrics (i.e., BLEU-4, ROUGE-2 and ROUGE-L). The reason might be that these LLMs mainly focus on English tasks, and cannot well adapt to Chinese math-related tasks. In contrast, these continual pre-training methods have been trained over the math corpus, thus having an adaptation capacity in downstream tasks. While, for the Accuracy metric, chain-of-thought methods perform relatively better than other baselines. It shows that LLMs are more skilled in accurately predicting the answer, since they have a stronger mathematical reasoning capacity due to the huge model size and large-scale pre-training corpus (also including large amounts of mathematical texts). Finally, our proposed JiuZhang 2.0 outperforms all the baselines in most cases. By integrating the MoE architecture with multi-task training, our model can better capture the mathematical knowledge across various math-related tasks. Even without iterative refinement via the LLM, our model (i.e., JiuZhang 2.0 w/o IRL) can still outperform all the baselines. After incorporating the iterative refinement via the LLM, the performance of our approach can be further improved, especially on the Accuracy metric. It demonstrates that our approach can further benefit from the mathematical reasoning capacity of the LLM. In this way, JiuZhang 2.0 can combine both the advantages of the PLM and LLM: PLM can be tuned for domain adaptation to Chinese math-related tasks, while LLM has stronger reasoning and generation capacities. 4.2.2 Evaluation on Unseen Tasks. Since multi-task fine-tuning cannot cover all math-related tasks, we continue to examine the performance of our model on new tasks that are not seen before. In order to enlarge the domain gap between existing and new tasks, we select the two tasks of multiple-choice analysis generation KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Wayne Xin Zhao et al. Table 4: Main results on two analysis generation tasks for junior high school in the unseen setting. Methods JCAG JBAG BLEU-4 ROUGE-2 ROUGE-L Accuracy BLEU-4 ROUGE-2 ROUGE-L Accuracy BART 50.50 59.67 73.15 50.40 54.54 60.75 74.51 30.60 CPT 49.38 59.27 72.91 48.20 53.50 60.32 74.23 27.60 DAPT-CPT 52.06 60.84 73.53 54.50 54.66 61.36 74.78 32.30 JiuZhang 52.13 61.43 73.87 55.30 55.69 61.73 75.00 34.50 JiuZhang 2.0 w/o IRL 53.37 61.74 74.00 55.60 56.19 62.13 75.36 38.10 JiuZhang 2.0 55.73 63.76 75.37 63.20 54.45 64.81 77.14 53.80 \u00ac MTPT \u00ac MTFT \u00ac MoE \u00ac TR Ours 48.5 49.0 49.5 50.0 CAG BLEU-4 \u00ac MTPT \u00ac MTFT \u00ac MoE \u00ac TR Ours 52.5 54.0 55.5 57.0 58.5 CAG Acc. \u00ac MTPT \u00ac MTFT \u00ac MoE \u00ac TR Ours 45.5 46.5 47.5 48.5 BAG BLEU-4 \u00ac MTPT \u00ac MTFT \u00ac MoE \u00ac TR Ours 48.0 49.5 51.0 52.5 54.0 BAG Acc. Figure 2: Ablation study of our approach on CAG and BAG tasks. \u201c\u00ac\u201d indicates that the corresponding technique is removed from our model, while the rest are kept. We abbreviate the terms Multi-task Continual Pre-Training, Multi-Task Fine-Tuning, Mixture-of-Experts, and Task embedding in Routing network as MTPT, MTFT, MoE and TR respectively. (JCAG) and blank-filling analysis generation (JBAG) from junior high schools, which has a different distribution with those from high schools (in multi-task fine-tuning). For these two unseen tasks, we fine-tune our model (task by task) on them after multi-task fine-tuning, as the same way in the baselines. From Table 4, we can see that the overall experimental findings are similar to those discussed in Section 4.2.1, where we have the overall performance order: PLMs < continual pre-training methods < JiuZhang < JiuZhang 2.0 w/o IRL < JiuZhang 2.0. In particular, the variant of JiuZhang 2.0 w/o IRL also performs better than all these baselines, since it employs MoE extension with multi-task training, thus having an improved ability for capturing common mathematical knowledge across tasks. Further, by adopting the iterative refinement via LLMs (IRL), our JiuZhang 2.0 achieves a significant improvement on the Accuracy metric (i.e., 55.60 \u219263.20 on JCAG, 38.10 \u219253.80 on JBAG). The results show that the proposed IRL strategy can effectively leverage the strong generation and reasoning capacities of LLMs via in-context learning, which can gradually improve the generation quality of our PLM. 4.3 Detailed Analysis 4.3.1 Ablation Study. In JiuZhang 2.0, we have proposed a series of improvement techniques for enhancing the capacity for mathematical problem solving. Next, we study how each technique contributes to the model performance. We keep the complete model with all improvement techniques as a reference, then remove one specific technique each time, and compare the performance with and without it. We consider the following variants: (1) \u00ac MoE removes the MoE extension, (2) \u00ac MTPT removes multi-task continual pre-training, (3) \u00ac MTFT removes multi-task fine-tuning, and (4) \u00ac TR removes the task embedding from the routing network. Note that \u00ac MoE 1 2 4 8 48 50 52 54 56 58 60 CAG BLEU-4 Accuracy 1 2 4 8 46 48 50 52 54 56 BAG BLEU-4 Accuracy Figure 3: Varying the number of experts (\ud835\udc3e) in our approach. can be considered as an implementation of the multi-task learning method [35] with JiuZhang as the backbone model. We report BLEU-4 and Accuracy of these variants on the CAG and BAG tasks. From Figure 2, we observe that removing any of these improvements would lead to performance degradation, which indicates the effectiveness of these proposed techniques in mathematical problem solving. In particular, the removal of multi-task pre-training or fine-tuning leads to a larger performance drop, which shows the two training strategies are more important to improve the model performance. These two tasks are well suited to the MoE architecture, and they can help capture the mathematical knowledge via the expert networks. 4.3.2 Hyper-parameters Analysis. In our MoE architecture, there are two major hyper-parameters to tune, i.e., the number of experts \ud835\udc3eand the number of activated experts \ud835\udc58in the MoE layers. Next, we investigate the effect of each hyper-parameter on our approach. We conduct the analysis experiments on CAG and BAG tasks and report the results on BLEU-4 and Accuracy metrics for the two hyper-parameters in Figure 3 and Figure 4, respectively. JiuZhang 2.0: A Unified Chinese Pre-trained Language Model for Multi-task Mathematical Problem Solving KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA 1 2 3 4 48 50 52 54 56 58 60 CAG BLEU-4 Accuracy 1 2 3 4 46 48 50 52 54 56 BAG BLEU-4 Accuracy Figure 4: Varying the number of activated experts (\ud835\udc58). Table 5: Online \ud835\udc34/\ud835\udc35test of JiuZhang 2.0 and JiuZhang via the automatic math problem solving function on Zhixuewang. JiuZhang 2.0 Wins JiuZhang Wins Ratio 53.5 % 46.5% First, the increase in the number of experts does not necessarily improve the performance of our approach (Figure 3), especially in the Accuracy metric. A possible reason is that the MoE architecture introduces additional parameters, which is more likely to overfit on the training set. Besides, using more experts also leads to larger computational costs. In our experiments, to balance the effectiveness and efficiency, we set \ud835\udc3e= 4, i.e., using four expert networks, which generally gives a good performance. Second, more activated experts are not useful to improve the model performance, even leading to performance degradation (Figure 4). A possible reason is that activating more experts would cause interference among them, resulting in the conflict utilization of experts. In contrast, by setting \ud835\udc58= 1, we can not only achieve a relatively better performance, but also save the computation cost of activated expert networks. 4.3.3 Analysis on the MoE Architecture. A major contribution of our model lies in the architecture extension with MoE. By setting multiple expert networks, we can effectively share the mathematical knowledge learned from the math corpus across tasks, so as to improve multi-task mathematical problem solving. These experts are expected to capture and decompose specific mathematical knowledge for different math tasks. Next, we present an analysis experiment about the encoded knowledge at each expert network. As shown in Table 6, we select three mathematical texts from two tasks, and show the routed expert for each token (toke-level routing) in different background colors. It can be observed that our routing network can effectively decompose the mathematical knowledge and route them to the corresponding experts. For example, the trigonometric functions (e.g., \ud835\udc60\ud835\udc56\ud835\udc5band \ud835\udf0b) are routed to expert #3, while the (background or formal) words and numbers are mainly assigned to expert #1 and expert #2, respectively. 4.4 Online \ud835\udc34/\ud835\udc35Test Besides offline evaluation, we further conduct the online \ud835\udc34/\ud835\udc35test on Zhixuewang2 for examining the practical performance of our approach. Zhixuewang is designed as a teacher assistant app that provides personalized education services to students, accumulating about 51 million users in China mainland. Specially, we employ 2https://www.zhixue.com/ Table 6: Case study on the token-level expert routing. We use the background color to indicate different experts: expert #1 , expert #2 , expert #3 . Task Problem Content BAG 5 sin 90\u25e6+ 2 sin 0\u25e6\u22123 sin 270\u25e6+ 10 cos 180 \u25e6=____ . KPC Known the domain of definition of function \ud835\udc66= 2\ud835\udc4ecos(2\ud835\udc65\u2212\ud835\udf0b 3 ) + \ud835\udc4fis [0, \ud835\udf0b 2 ] and the domain of function is [\u22125, 1]. Find the value of \ud835\udc4e, \ud835\udc4f. KPC A seagoing ship starts from A, sails in a straight line at a speed of 40 nautical miles per hour in the direction of 40\u25e6...... the function of automatic math problem solving on Zhixuewang for conducting online \ud835\udc34/\ud835\udc35test. Given a math problem (e.g., blankinfilling problem), this function aims to automatically generate the answer with a detailed analysis of the solving process. Here, we compare our JiuZhang 2.0 with the original JiuZhang [56], and both models are fine-tuned by the training data provided by this app. For comparison, we sample a small population of requests of this function, and a user will be asked to select her/his preferred answer and analysis provided by the two models in each request. Table 5 reports the winning ratio of the two methods. As we can see, our proposed JiuZhang 2.0 performs better than the baseline JiuZhang. The major reason is that our model adopts the multi-task training with MoE layers to better capture the shared knowledge across multiple math-related tasks, and also leverages LLMs to iteratively refine the generated results. In this way, our model can generate more accurate answers and high-quality analysis. 5 CONCLUSION In this paper, we proposed JiuZhang 2.0, a unified Chinese PLM for multi-task mathematical problem solving. Different from previous PLM approaches for math domain, we focus on improving the multitask capacity of PLMs, especially on complex tasks. For this purpose, we designed a MoE-based encoder for modeling the mathematical text, aiming to share the mathematical knowledge across different tasks. To support the MoE architecture, we specially designed multitask continual pre-training and multi-task fine-tuning strategies for learning the shared knowledge via expert networks. Further, we leveraged the powerful LLMs as a complementary role to iteratively refine the generation results by our PLM, with the elaborately designed prompts. Experimental results (both offline evaluation and online \ud835\udc34/\ud835\udc35test) have demonstrated that our approach is superior to competitive baselines on a variety of math-related tasks. ACKNOWLEDGEMENT This work was partially supported by National Natural Science Foundation of China under Grant No. 62222215, Beijing Natural Science Foundation under Grant No. 4222027, and Beijing Outstanding Young Scientist Program under Grant No. BJJWZYJH012019100020098. And this work is also partially supported by the Outstanding Innovative Talents Cultivation Funded Programs 2021 of Renmin University of China. Xin Zhao is the corresponding author. KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Wayne Xin Zhao et al." + }, + { + "url": "http://arxiv.org/abs/2204.07689v1", + "title": "Sparsely Activated Mixture-of-Experts are Robust Multi-Task Learners", + "abstract": "Traditional multi-task learning (MTL) methods use dense networks that use the\nsame set of shared weights across several different tasks. This often creates\ninterference where two or more tasks compete to pull model parameters in\ndifferent directions. In this work, we study whether sparsely activated\nMixture-of-Experts (MoE) improve multi-task learning by specializing some\nweights for learning shared representations and using the others for learning\ntask-specific information. To this end, we devise task-aware gating functions\nto route examples from different tasks to specialized experts which share\nsubsets of network weights conditioned on the task. This results in a sparsely\nactivated multi-task model with a large number of parameters, but with the same\ncomputational cost as that of a dense model. We demonstrate such sparse\nnetworks to improve multi-task learning along three key dimensions: (i)\ntransfer to low-resource tasks from related tasks in the training mixture; (ii)\nsample-efficient generalization to tasks not seen during training by making use\nof task-aware routing from seen related tasks; (iii) robustness to the addition\nof unrelated tasks by avoiding catastrophic forgetting of existing tasks.", + "authors": "Shashank Gupta, Subhabrata Mukherjee, Krishan Subudhi, Eduardo Gonzalez, Damien Jose, Ahmed H. Awadallah, Jianfeng Gao", + "published": "2022-04-16", + "updated": "2022-04-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2211.10435v2", + "title": "PAL: Program-aided Language Models", + "abstract": "Large language models (LLMs) have recently demonstrated an impressive ability\nto perform arithmetic and symbolic reasoning tasks, when provided with a few\nexamples at test time (\"few-shot prompting\"). Much of this success can be\nattributed to prompting methods such as \"chain-of-thought'', which employ LLMs\nfor both understanding the problem description by decomposing it into steps, as\nwell as solving each step of the problem. While LLMs seem to be adept at this\nsort of step-by-step decomposition, LLMs often make logical and arithmetic\nmistakes in the solution part, even when the problem is decomposed correctly.\nIn this paper, we present Program-Aided Language models (PAL): a novel approach\nthat uses the LLM to read natural language problems and generate programs as\nthe intermediate reasoning steps, but offloads the solution step to a runtime\nsuch as a Python interpreter. With PAL, decomposing the natural language\nproblem into runnable steps remains the only learning task for the LLM, while\nsolving is delegated to the interpreter. We demonstrate this synergy between a\nneural LLM and a symbolic interpreter across 13 mathematical, symbolic, and\nalgorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all\nthese natural language reasoning tasks, generating code using an LLM and\nreasoning using a Python interpreter leads to more accurate results than much\nlarger models. For example, PAL using Codex achieves state-of-the-art few-shot\naccuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B\nwhich uses chain-of-thought by absolute 15% top-1. Our code and data are\npublicly available at http://reasonwithpal.com/ .", + "authors": "Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, Graham Neubig", + "published": "2022-11-18", + "updated": "2023-01-27", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.16257v5", + "title": "Solving Math Word Problems via Cooperative Reasoning induced Language Models", + "abstract": "Large-scale pre-trained language models (PLMs) bring new opportunities to\nchallenging problems, especially those that need high-level intelligence, such\nas the math word problem (MWPs). However, directly applying existing PLMs to\nMWPs can fail as the generation process lacks sufficient supervision and thus\nlacks fast adaptivity as humans. We notice that human reasoning has a dual\nreasoning framework that consists of an immediate reaction system (system 1)\nand a delicate reasoning system (system 2), where the entire reasoning is\ndetermined by their interaction. This inspires us to develop a cooperative\nreasoning-induced PLM for solving MWPs, called Cooperative Reasoning (CoRe),\nresulting in a human-like reasoning architecture with system 1 as the generator\nand system 2 as the verifier. In our approach, the generator is responsible for\ngenerating reasoning paths, and the verifiers are used to supervise the\nevaluation in order to obtain reliable feedback for the generator. We evaluate\nour CoRe framework on several mathematical reasoning datasets and achieve\ndecent improvement over state-of-the-art methods, up to 9.6% increase over best\nbaselines. Our codes are available at https://github.com/TianHongZXY/CoRe", + "authors": "Xinyu Zhu, Junjie Wang, Lin Zhang, Yuxiang Zhang, Ruyi Gan, Jiaxing Zhang, Yujiu Yang", + "published": "2022-10-28", + "updated": "2023-12-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2110.03742v1", + "title": "Beyond Distillation: Task-level Mixture-of-Experts for Efficient Inference", + "abstract": "Sparse Mixture-of-Experts (MoE) has been a successful approach for scaling\nmultilingual translation models to billions of parameters without a\nproportional increase in training computation. However, MoE models are\nprohibitively large and practitioners often resort to methods such as\ndistillation for serving. In this work, we investigate routing strategies at\ndifferent granularity (token, sentence, task) in MoE models to bypass\ndistillation. Experiments on WMT and a web-scale dataset suggest that\ntask-level routing (task-MoE) enables us to extract smaller, ready-to-deploy\nsub-networks from large sparse models. On WMT, our task-MoE with 32 experts\n(533M parameters) outperforms the best performing token-level MoE model\n(token-MoE) by +1.0 BLEU on average across 30 language pairs. The peak\ninference throughput is also improved by a factor of 1.9x when we route by\ntasks instead of tokens. While distilling a token-MoE to a smaller dense model\npreserves only 32% of the BLEU gains, our sub-network task-MoE, by design,\npreserves all the gains with the same inference cost as the distilled student\nmodel. Finally, when scaling up to 200 language pairs, our 128-expert task-MoE\n(13B parameters) performs competitively with a token-level counterpart, while\nimproving the peak inference throughput by a factor of 2.6x.", + "authors": "Sneha Kudugunta, Yanping Huang, Ankur Bapna, Maxim Krikun, Dmitry Lepikhin, Minh-Thang Luong, Orhan Firat", + "published": "2021-09-24", + "updated": "2021-09-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2212.10535v2", + "title": "A Survey of Deep Learning for Mathematical Reasoning", + "abstract": "Mathematical reasoning is a fundamental aspect of human intelligence and is\napplicable in various fields, including science, engineering, finance, and\neveryday life. The development of artificial intelligence (AI) systems capable\nof solving math problems and proving theorems has garnered significant interest\nin the fields of machine learning and natural language processing. For example,\nmathematics serves as a testbed for aspects of reasoning that are challenging\nfor powerful deep learning models, driving new algorithmic and modeling\nadvances. On the other hand, recent advances in large-scale neural language\nmodels have opened up new benchmarks and opportunities to use deep learning for\nmathematical reasoning. In this survey paper, we review the key tasks,\ndatasets, and methods at the intersection of mathematical reasoning and deep\nlearning over the past decade. We also evaluate existing benchmarks and\nmethods, and discuss future research directions in this domain.", + "authors": "Pan Lu, Liang Qiu, Wenhao Yu, Sean Welleck, Kai-Wei Chang", + "published": "2022-12-20", + "updated": "2023-06-22", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.03874v2", + "title": "Measuring Mathematical Problem Solving With the MATH Dataset", + "abstract": "Many intellectual endeavors require mathematical problem solving, but this\nskill remains beyond the capabilities of computers. To measure this ability in\nmachine learning models, we introduce MATH, a new dataset of 12,500 challenging\ncompetition mathematics problems. Each problem in MATH has a full step-by-step\nsolution which can be used to teach models to generate answer derivations and\nexplanations. To facilitate future research and increase accuracy on MATH, we\nalso contribute a large auxiliary pretraining dataset which helps teach models\nthe fundamentals of mathematics. Even though we are able to increase accuracy\non MATH, our results show that accuracy remains relatively low, even with\nenormous Transformer models. Moreover, we find that simply increasing budgets\nand model parameter counts will be impractical for achieving strong\nmathematical reasoning if scaling trends continue. While scaling Transformers\nis automatically solving most other text-based tasks, scaling is not currently\nsolving MATH. To have more traction on mathematical problem solving we will\nlikely need new algorithmic advancements from the broader research community.", + "authors": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, Jacob Steinhardt", + "published": "2021-03-05", + "updated": "2021-11-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2105.00377v1", + "title": "MathBERT: A Pre-Trained Model for Mathematical Formula Understanding", + "abstract": "Large-scale pre-trained models like BERT, have obtained a great success in\nvarious Natural Language Processing (NLP) tasks, while it is still a challenge\nto adapt them to the math-related tasks. Current pre-trained models neglect the\nstructural features and the semantic correspondence between formula and its\ncontext. To address these issues, we propose a novel pre-trained model, namely\n\\textbf{MathBERT}, which is jointly trained with mathematical formulas and\ntheir corresponding contexts. In addition, in order to further capture the\nsemantic-level structural features of formulas, a new pre-training task is\ndesigned to predict the masked formula substructures extracted from the\nOperator Tree (OPT), which is the semantic structural representation of\nformulas. We conduct various experiments on three downstream tasks to evaluate\nthe performance of MathBERT, including mathematical information retrieval,\nformula topic classification and formula headline generation. Experimental\nresults demonstrate that MathBERT significantly outperforms existing methods on\nall those three tasks. Moreover, we qualitatively show that this pre-trained\nmodel effectively captures the semantic-level structural information of\nformulas. To the best of our knowledge, MathBERT is the first pre-trained model\nfor mathematical formula understanding.", + "authors": "Shuai Peng, Ke Yuan, Liangcai Gao, Zhi Tang", + "published": "2021-05-02", + "updated": "2021-05-02", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2201.11903v6", + "title": "Chain-of-Thought Prompting Elicits Reasoning in Large Language Models", + "abstract": "We explore how generating a chain of thought -- a series of intermediate\nreasoning steps -- significantly improves the ability of large language models\nto perform complex reasoning. In particular, we show how such reasoning\nabilities emerge naturally in sufficiently large language models via a simple\nmethod called chain of thought prompting, where a few chain of thought\ndemonstrations are provided as exemplars in prompting. Experiments on three\nlarge language models show that chain of thought prompting improves performance\non a range of arithmetic, commonsense, and symbolic reasoning tasks. The\nempirical gains can be striking. For instance, prompting a 540B-parameter\nlanguage model with just eight chain of thought exemplars achieves state of the\nart accuracy on the GSM8K benchmark of math word problems, surpassing even\nfinetuned GPT-3 with a verifier.", + "authors": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, Denny Zhou", + "published": "2022-01-28", + "updated": "2023-01-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2202.08906v2", + "title": "ST-MoE: Designing Stable and Transferable Sparse Expert Models", + "abstract": "Scale has opened new frontiers in natural language processing -- but at a\nhigh cost. In response, Mixture-of-Experts (MoE) and Switch Transformers have\nbeen proposed as an energy efficient path to even larger and more capable\nlanguage models. But advancing the state-of-the-art across a broad set of\nnatural language tasks has been hindered by training instabilities and\nuncertain quality during fine-tuning. Our work focuses on these issues and acts\nas a design guide. We conclude by scaling a sparse model to 269B parameters,\nwith a computational cost comparable to a 32B dense encoder-decoder Transformer\n(Stable and Transferable Mixture-of-Experts or ST-MoE-32B). For the first time,\na sparse model achieves state-of-the-art performance in transfer learning,\nacross a diverse set of tasks including reasoning (SuperGLUE, ARC Easy, ARC\nChallenge), summarization (XSum, CNN-DM), closed book question answering\n(WebQA, Natural Questions), and adversarially constructed tasks (Winogrande,\nANLI R3).", + "authors": "Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, William Fedus", + "published": "2022-02-17", + "updated": "2022-04-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2211.12588v4", + "title": "Program of Thoughts Prompting: Disentangling Computation from Reasoning for Numerical Reasoning Tasks", + "abstract": "Recently, there has been significant progress in teaching language models to\nperform step-by-step reasoning to solve complex numerical reasoning tasks.\nChain-of-thoughts prompting (CoT) is by far the state-of-art method for these\ntasks. CoT uses language models to perform both reasoning and computation in\nthe multi-step `thought' process. To disentangle computation from reasoning, we\npropose `Program of Thoughts' (PoT), which uses language models (mainly Codex)\nto express the reasoning process as a program. The computation is relegated to\nan external computer, which executes the generated programs to derive the\nanswer. We evaluate PoT on five math word problem datasets (GSM, AQuA, SVAMP,\nTabMWP, MultiArith) and three financial-QA datasets (FinQA, ConvFinQA, TATQA)\nfor both few-shot and zero-shot setups. Under both few-shot and zero-shot\nsettings, PoT can show an average performance gain over CoT by around 12\\%\nacross all the evaluated datasets. By combining PoT with self-consistency\ndecoding, we can achieve SoTA performance on all math problem datasets and\nnear-SoTA performance on financial datasets. All of our data and code are\nreleased in Github https://github.com/wenhuchen/Program-of-Thoughts", + "authors": "Wenhu Chen, Xueguang Ma, Xinyi Wang, William W. Cohen", + "published": "2022-11-22", + "updated": "2023-10-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.15683v1", + "title": "Why are NLP Models Fumbling at Elementary Math? A Survey of Deep Learning based Word Problem Solvers", + "abstract": "From the latter half of the last decade, there has been a growing interest in\ndeveloping algorithms for automatically solving mathematical word problems\n(MWP). It is a challenging and unique task that demands blending surface level\ntext pattern recognition with mathematical reasoning. In spite of extensive\nresearch, we are still miles away from building robust representations of\nelementary math word problems and effective solutions for the general task. In\nthis paper, we critically examine the various models that have been developed\nfor solving word problems, their pros and cons and the challenges ahead. In the\nlast two years, a lot of deep learning models have recorded competing results\non benchmark datasets, making a critical and conceptual analysis of literature\nhighly useful at this juncture. We take a step back and analyse why, in spite\nof this abundance in scholarly interest, the predominantly used experiment and\ndataset designs continue to be a stumbling block. From the vantage point of\nhaving analyzed the literature closely, we also endeavour to provide a road-map\nfor future math word problem research.", + "authors": "Sowmya S Sundaram, Sairam Gurajada, Marco Fisichella, Deepak P, Savitha Sam Abraham", + "published": "2022-05-31", + "updated": "2022-05-31", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.11171v4", + "title": "Self-Consistency Improves Chain of Thought Reasoning in Language Models", + "abstract": "Chain-of-thought prompting combined with pre-trained large language models\nhas achieved encouraging results on complex reasoning tasks. In this paper, we\npropose a new decoding strategy, self-consistency, to replace the naive greedy\ndecoding used in chain-of-thought prompting. It first samples a diverse set of\nreasoning paths instead of only taking the greedy one, and then selects the\nmost consistent answer by marginalizing out the sampled reasoning paths.\nSelf-consistency leverages the intuition that a complex reasoning problem\ntypically admits multiple different ways of thinking leading to its unique\ncorrect answer. Our extensive empirical evaluation shows that self-consistency\nboosts the performance of chain-of-thought prompting with a striking margin on\na range of popular arithmetic and commonsense reasoning benchmarks, including\nGSM8K (+17.9%), SVAMP (+11.0%), AQuA (+12.2%), StrategyQA (+6.4%) and\nARC-challenge (+3.9%).", + "authors": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, Denny Zhou", + "published": "2022-03-21", + "updated": "2023-03-07", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2206.14858v2", + "title": "Solving Quantitative Reasoning Problems with Language Models", + "abstract": "Language models have achieved remarkable performance on a wide range of tasks\nthat require natural language understanding. Nevertheless, state-of-the-art\nmodels have generally struggled with tasks that require quantitative reasoning,\nsuch as solving mathematics, science, and engineering problems at the college\nlevel. To help close this gap, we introduce Minerva, a large language model\npretrained on general natural language data and further trained on technical\ncontent. The model achieves state-of-the-art performance on technical\nbenchmarks without the use of external tools. We also evaluate our model on\nover two hundred undergraduate-level problems in physics, biology, chemistry,\neconomics, and other sciences that require quantitative reasoning, and find\nthat the model can correctly answer nearly a third of them.", + "authors": "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, Vedant Misra", + "published": "2022-06-29", + "updated": "2022-07-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1701.06538v1", + "title": "Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer", + "abstract": "The capacity of a neural network to absorb information is limited by its\nnumber of parameters. Conditional computation, where parts of the network are\nactive on a per-example basis, has been proposed in theory as a way of\ndramatically increasing model capacity without a proportional increase in\ncomputation. In practice, however, there are significant algorithmic and\nperformance challenges. In this work, we address these challenges and finally\nrealize the promise of conditional computation, achieving greater than 1000x\nimprovements in model capacity with only minor losses in computational\nefficiency on modern GPU clusters. We introduce a Sparsely-Gated\nMixture-of-Experts layer (MoE), consisting of up to thousands of feed-forward\nsub-networks. A trainable gating network determines a sparse combination of\nthese experts to use for each example. We apply the MoE to the tasks of\nlanguage modeling and machine translation, where model capacity is critical for\nabsorbing the vast quantities of knowledge available in the training corpora.\nWe present model architectures in which a MoE with up to 137 billion parameters\nis applied convolutionally between stacked LSTM layers. On large language\nmodeling and machine translation benchmarks, these models achieve significantly\nbetter results than state-of-the-art at lower computational cost.", + "authors": "Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, Jeff Dean", + "published": "2017-01-23", + "updated": "2017-01-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "cs.NE", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.12701v2", + "title": "Eliciting and Understanding Cross-Task Skills with Task-Level Mixture-of-Experts", + "abstract": "Recent works suggest that transformer models are capable of multi-tasking on\ndiverse NLP tasks and adapting to new tasks efficiently. However, the potential\nof these multi-task models may be limited as they use the same set of\nparameters for all tasks. In contrast, humans tackle tasks in a more flexible\nway, by making proper presumptions on what skills and knowledge are relevant\nand executing only the necessary computations. Inspired by this, we propose to\nuse task-level mixture-of-expert models, which has a collection of transformer\nlayers (i.e., experts) and a router component that chooses from these experts\ndynamically and flexibly. We find that these models help improve the average\nperformance gain (ARG) metric by 2.6% when adapting to unseen tasks in the\nfew-shot setting and by 5.6% in the zero-shot generalization setting. Further,\nwe show that the learned routing decisions partly rediscover human\ncategorization of NLP tasks -- certain experts are strongly associated with\nextractive tasks, some with classification tasks, and some with tasks requiring\nworld knowledge.", + "authors": "Qinyuan Ye, Juan Zha, Xiang Ren", + "published": "2022-05-25", + "updated": "2022-11-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.10625v3", + "title": "Least-to-Most Prompting Enables Complex Reasoning in Large Language Models", + "abstract": "Chain-of-thought prompting has demonstrated remarkable performance on various\nnatural language reasoning tasks. However, it tends to perform poorly on tasks\nwhich requires solving problems harder than the exemplars shown in the prompts.\nTo overcome this challenge of easy-to-hard generalization, we propose a novel\nprompting strategy, least-to-most prompting. The key idea in this strategy is\nto break down a complex problem into a series of simpler subproblems and then\nsolve them in sequence. Solving each subproblem is facilitated by the answers\nto previously solved subproblems. Our experimental results on tasks related to\nsymbolic manipulation, compositional generalization, and math reasoning reveal\nthat least-to-most prompting is capable of generalizing to more difficult\nproblems than those seen in the prompts. A notable finding is that when the\nGPT-3 code-davinci-002 model is used with least-to-most prompting, it can solve\nthe compositional generalization benchmark SCAN in any split (including length\nsplit) with an accuracy of at least 99% using just 14 exemplars, compared to\nonly 16% accuracy with chain-of-thought prompting. This is particularly\nnoteworthy because neural-symbolic models in the literature that specialize in\nsolving SCAN are trained on the entire training set containing over 15,000\nexamples. We have included prompts for all the tasks in the Appendix.", + "authors": "Denny Zhou, Nathanael Sch\u00e4rli, Le Hou, Jason Wei, Nathan Scales, Xuezhi Wang, Dale Schuurmans, Claire Cui, Olivier Bousquet, Quoc Le, Ed Chi", + "published": "2022-05-21", + "updated": "2023-04-16", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1810.04805v2", + "title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", + "abstract": "We introduce a new language representation model called BERT, which stands\nfor Bidirectional Encoder Representations from Transformers. Unlike recent\nlanguage representation models, BERT is designed to pre-train deep\nbidirectional representations from unlabeled text by jointly conditioning on\nboth left and right context in all layers. As a result, the pre-trained BERT\nmodel can be fine-tuned with just one additional output layer to create\nstate-of-the-art models for a wide range of tasks, such as question answering\nand language inference, without substantial task-specific architecture\nmodifications.\n BERT is conceptually simple and empirically powerful. It obtains new\nstate-of-the-art results on eleven natural language processing tasks, including\npushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI\naccuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering\nTest F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1\n(5.1 point absolute improvement).", + "authors": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, Kristina Toutanova", + "published": "2018-10-11", + "updated": "2019-05-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.11916v4", + "title": "Large Language Models are Zero-Shot Reasoners", + "abstract": "Pretrained large language models (LLMs) are widely used in many sub-fields of\nnatural language processing (NLP) and generally known as excellent few-shot\nlearners with task-specific exemplars. Notably, chain of thought (CoT)\nprompting, a recent technique for eliciting complex multi-step reasoning\nthrough step-by-step answer examples, achieved the state-of-the-art\nperformances in arithmetics and symbolic reasoning, difficult system-2 tasks\nthat do not follow the standard scaling laws for LLMs. While these successes\nare often attributed to LLMs' ability for few-shot learning, we show that LLMs\nare decent zero-shot reasoners by simply adding \"Let's think step by step\"\nbefore each answer. Experimental results demonstrate that our Zero-shot-CoT,\nusing the same single prompt template, significantly outperforms zero-shot LLM\nperformances on diverse benchmark reasoning tasks including arithmetics\n(MultiArith, GSM8K, AQUA-RAT, SVAMP), symbolic reasoning (Last Letter, Coin\nFlip), and other logical reasoning tasks (Date Understanding, Tracking Shuffled\nObjects), without any hand-crafted few-shot examples, e.g. increasing the\naccuracy on MultiArith from 17.7% to 78.7% and GSM8K from 10.4% to 40.7% with\nlarge InstructGPT model (text-davinci-002), as well as similar magnitudes of\nimprovements with another off-the-shelf large model, 540B parameter PaLM. The\nversatility of this single prompt across very diverse reasoning tasks hints at\nuntapped and understudied fundamental zero-shot capabilities of LLMs,\nsuggesting high-level, multi-task broad cognitive capabilities may be extracted\nby simple prompting. We hope our work not only serves as the minimal strongest\nzero-shot baseline for the challenging reasoning benchmarks, but also\nhighlights the importance of carefully exploring and analyzing the enormous\nzero-shot knowledge hidden inside LLMs before crafting finetuning datasets or\nfew-shot exemplars.", + "authors": "Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, Yusuke Iwasawa", + "published": "2022-05-24", + "updated": "2023-01-29", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2110.14168v2", + "title": "Training Verifiers to Solve Math Word Problems", + "abstract": "State-of-the-art language models can match human performance on many tasks,\nbut they still struggle to robustly perform multi-step mathematical reasoning.\nTo diagnose the failures of current models and support research, we introduce\nGSM8K, a dataset of 8.5K high quality linguistically diverse grade school math\nword problems. We find that even the largest transformer models fail to achieve\nhigh test performance, despite the conceptual simplicity of this problem\ndistribution. To increase performance, we propose training verifiers to judge\nthe correctness of model completions. At test time, we generate many candidate\nsolutions and select the one ranked highest by the verifier. We demonstrate\nthat verification significantly improves performance on GSM8K, and we provide\nstrong empirical evidence that verification scales more effectively with\nincreased data than a finetuning baseline.", + "authors": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, John Schulman", + "published": "2021-10-27", + "updated": "2021-11-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.03874v2", + "title": "Measuring Mathematical Problem Solving With the MATH Dataset", + "abstract": "Many intellectual endeavors require mathematical problem solving, but this\nskill remains beyond the capabilities of computers. To measure this ability in\nmachine learning models, we introduce MATH, a new dataset of 12,500 challenging\ncompetition mathematics problems. Each problem in MATH has a full step-by-step\nsolution which can be used to teach models to generate answer derivations and\nexplanations. To facilitate future research and increase accuracy on MATH, we\nalso contribute a large auxiliary pretraining dataset which helps teach models\nthe fundamentals of mathematics. Even though we are able to increase accuracy\non MATH, our results show that accuracy remains relatively low, even with\nenormous Transformer models. Moreover, we find that simply increasing budgets\nand model parameter counts will be impractical for achieving strong\nmathematical reasoning if scaling trends continue. While scaling Transformers\nis automatically solving most other text-based tasks, scaling is not currently\nsolving MATH. To have more traction on mathematical problem solving we will\nlikely need new algorithmic advancements from the broader research community.", + "authors": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, Jacob Steinhardt", + "published": "2021-03-05", + "updated": "2021-11-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1901.11504v2", + "title": "Multi-Task Deep Neural Networks for Natural Language Understanding", + "abstract": "In this paper, we present a Multi-Task Deep Neural Network (MT-DNN) for\nlearning representations across multiple natural language understanding (NLU)\ntasks. MT-DNN not only leverages large amounts of cross-task data, but also\nbenefits from a regularization effect that leads to more general\nrepresentations in order to adapt to new tasks and domains. MT-DNN extends the\nmodel proposed in Liu et al. (2015) by incorporating a pre-trained\nbidirectional transformer language model, known as BERT (Devlin et al., 2018).\nMT-DNN obtains new state-of-the-art results on ten NLU tasks, including SNLI,\nSciTail, and eight out of nine GLUE tasks, pushing the GLUE benchmark to 82.7%\n(2.2% absolute improvement). We also demonstrate using the SNLI and SciTail\ndatasets that the representations learned by MT-DNN allow domain adaptation\nwith substantially fewer in-domain labels than the pre-trained BERT\nrepresentations. The code and pre-trained models are publicly available at\nhttps://github.com/namisan/mt-dnn.", + "authors": "Xiaodong Liu, Pengcheng He, Weizhu Chen, Jianfeng Gao", + "published": "2019-01-31", + "updated": "2019-05-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2202.13914v2", + "title": "Combining Modular Skills in Multitask Learning", + "abstract": "A modular design encourages neural models to disentangle and recombine\ndifferent facets of knowledge to generalise more systematically to new tasks.\nIn this work, we assume that each task is associated with a subset of latent\ndiscrete skills from a (potentially small) inventory. In turn, skills\ncorrespond to parameter-efficient (sparse / low-rank) model parameterisations.\nBy jointly learning these and a task-skill allocation matrix, the network for\neach task is instantiated as the average of the parameters of active skills. To\nfavour non-trivial soft partitions of skills across tasks, we experiment with a\nseries of inductive biases, such as an Indian Buffet Process prior and a\ntwo-speed learning rate. We evaluate our latent-skill model on two main\nsettings: 1) multitask reinforcement learning for grounded instruction\nfollowing on 8 levels of the BabyAI platform; and 2) few-shot adaptation of\npre-trained text-to-text generative models on CrossFit, a benchmark comprising\n160 NLP tasks. We find that the modular design of a network significantly\nincreases sample efficiency in reinforcement learning and few-shot\ngeneralisation in supervised learning, compared to baselines with fully shared,\ntask-specific, or conditionally generated parameters where knowledge is\nentangled across tasks. In addition, we show how discrete skills help\ninterpretability, as they yield an explicit hierarchy of tasks.", + "authors": "Edoardo M. Ponti, Alessandro Sordoni, Yoshua Bengio, Siva Reddy", + "published": "2022-02-28", + "updated": "2022-03-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2109.00799v2", + "title": "MWPToolkit: An Open-Source Framework for Deep Learning-Based Math Word Problem Solvers", + "abstract": "Developing automatic Math Word Problem (MWP) solvers has been an interest of\nNLP researchers since the 1960s. Over the last few years, there are a growing\nnumber of datasets and deep learning-based methods proposed for effectively\nsolving MWPs. However, most existing methods are benchmarked soly on one or two\ndatasets, varying in different configurations, which leads to a lack of\nunified, standardized, fair, and comprehensive comparison between methods. This\npaper presents MWPToolkit, the first open-source framework for solving MWPs. In\nMWPToolkit, we decompose the procedure of existing MWP solvers into multiple\ncore components and decouple their models into highly reusable modules. We also\nprovide a hyper-parameter search function to boost the performance. In total,\nwe implement and compare 17 MWP solvers on 4 widely-used single equation\ngeneration benchmarks and 2 multiple equations generation benchmarks. These\nfeatures enable our MWPToolkit to be suitable for researchers to reproduce\nadvanced baseline models and develop new MWP solvers quickly. Code and\ndocuments are available at https://github.com/LYH-YF/MWPToolkit.", + "authors": "Yihuai Lan, Lei Wang, Qiyuan Zhang, Yunshi Lan, Bing Tian Dai, Yan Wang, Dongxiang Zhang, Ee-Peng Lim", + "published": "2021-09-02", + "updated": "2021-09-18", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.17517v2", + "title": "Lila: A Unified Benchmark for Mathematical Reasoning", + "abstract": "Mathematical reasoning skills are essential for general-purpose intelligent\nsystems to perform tasks from grocery shopping to climate modeling. Towards\nevaluating and improving AI systems in this domain, we propose LILA, a unified\nmathematical reasoning benchmark consisting of 23 diverse tasks along four\ndimensions: (i) mathematical abilities e.g., arithmetic, calculus (ii) language\nformat e.g., question-answering, fill-in-the-blanks (iii) language diversity\ne.g., no language, simple language (iv) external knowledge e.g., commonsense,\nphysics. We construct our benchmark by extending 20 datasets benchmark by\ncollecting task instructions and solutions in the form of Python programs,\nthereby obtaining explainable solutions in addition to the correct answer. We\nadditionally introduce two evaluation datasets to measure out-of-distribution\nperformance and robustness to language perturbation. Finally, we introduce\nBHASKARA, a general-purpose mathematical reasoning model trained on LILA.\nImportantly, we find that multi-tasking leads to significant improvements\n(average relative improvement of 21.83% F1 score vs. single-task models), while\nthe best performing model only obtains 60.40%, indicating the room for\nimprovement in general mathematical reasoning and understanding.", + "authors": "Swaroop Mishra, Matthew Finlayson, Pan Lu, Leonard Tang, Sean Welleck, Chitta Baral, Tanmay Rajpurohit, Oyvind Tafjord, Ashish Sabharwal, Peter Clark, Ashwin Kalyan", + "published": "2022-10-31", + "updated": "2023-03-08", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "68T50", + "I.2.7" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.00720v2", + "title": "Complexity-Based Prompting for Multi-Step Reasoning", + "abstract": "We study the task of prompting large-scale language models to perform\nmulti-step reasoning. Existing work shows that when prompted with a chain of\nthoughts (CoT), sequences of short sentences describing intermediate reasoning\nsteps towards a final answer, large language models can generate new reasoning\nchains and predict answers for new inputs. A central question is which\nreasoning examples make the most effective prompts. In this work, we propose\ncomplexity-based prompting, a simple and effective example selection scheme for\nmulti-step reasoning. We show that prompts with higher reasoning complexity,\ni.e., chains with more reasoning steps, achieve substantially better\nperformance on multi-step reasoning tasks over strong baselines. We further\nextend our complexity-based criteria from prompting (selecting inputs) to\ndecoding (selecting outputs), where we sample multiple reasoning chains from\nthe model, then choose the majority of generated answers from complex reasoning\nchains (over simple chains). When used to prompt GPT-3 and Codex, our approach\nsubstantially improves multi-step reasoning accuracy and achieves new\nstate-of-the-art (SOTA) performance on three math benchmarks (GSM8K,\nMultiArith, and MathQA) and two BigBenchHard tasks (Date Understanding and\nPenguins), with an average +5.3 and up to +18 accuracy improvements. Compared\nwith existing example selection schemes like manual tuning or retrieval-based\nselection, selection based on reasoning complexity is intuitive, easy to\nimplement, and annotation-efficient. Further results demonstrate the robustness\nof performance gains from complex prompts under format perturbation and\ndistribution shift.", + "authors": "Yao Fu, Hao Peng, Ashish Sabharwal, Peter Clark, Tushar Khot", + "published": "2022-10-03", + "updated": "2023-01-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2009.03393v1", + "title": "Generative Language Modeling for Automated Theorem Proving", + "abstract": "We explore the application of transformer-based language models to automated\ntheorem proving. This work is motivated by the possibility that a major\nlimitation of automated theorem provers compared to humans -- the generation of\noriginal mathematical terms -- might be addressable via generation from\nlanguage models. We present an automated prover and proof assistant, GPT-f, for\nthe Metamath formalization language, and analyze its performance. GPT-f found\nnew short proofs that were accepted into the main Metamath library, which is to\nour knowledge, the first time a deep-learning based system has contributed\nproofs that were adopted by a formal mathematics community.", + "authors": "Stanislas Polu, Ilya Sutskever", + "published": "2020-09-07", + "updated": "2020-09-07", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.15231v2", + "title": "A Survey in Mathematical Language Processing", + "abstract": "Informal mathematical text underpins real-world quantitative reasoning and\ncommunication. Developing sophisticated methods of retrieval and abstraction\nfrom this dual modality is crucial in the pursuit of the vision of automating\ndiscovery in quantitative science and mathematics. We track the development of\ninformal mathematical language processing approaches across five strategic\nsub-areas in recent years, highlighting the prevailing successful\nmethodological elements along with existing limitations.", + "authors": "Jordan Meadows, Andre Freitas", + "published": "2022-05-30", + "updated": "2024-04-08", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1811.00720v2", + "title": "Semantically-Aligned Equation Generation for Solving and Reasoning Math Word Problems", + "abstract": "Solving math word problems is a challenging task that requires accurate\nnatural language understanding to bridge natural language texts and math\nexpressions. Motivated by the intuition about how human generates the equations\ngiven the problem texts, this paper presents a neural approach to automatically\nsolve math word problems by operating symbols according to their semantic\nmeanings in texts. This paper views the process of generating equation as a\nbridge between the semantic world and the symbolic world, where the proposed\nneural math solver is based on an encoder-decoder framework. In the proposed\nmodel, the encoder is designed to understand the semantics of problems, and the\ndecoder focuses on tracking semantic meanings of the generated symbols and then\ndeciding which symbol to generate next. The preliminary experiments are\nconducted in a dataset Math23K, and our model significantly outperforms both\nthe state-of-the-art single model and the best non-retrieval-based model over\nabout 10% accuracy, demonstrating the effectiveness of bridging the symbolic\nand semantic worlds from math word problems.", + "authors": "Ting-Rui Chiang, Yun-Nung Chen", + "published": "2018-11-02", + "updated": "2019-06-09", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.03493v1", + "title": "Automatic Chain of Thought Prompting in Large Language Models", + "abstract": "Large language models (LLMs) can perform complex reasoning by generating\nintermediate reasoning steps. Providing these steps for prompting\ndemonstrations is called chain-of-thought (CoT) prompting. CoT prompting has\ntwo major paradigms. One leverages a simple prompt like \"Let's think step by\nstep\" to facilitate step-by-step thinking before answering a question. The\nother uses a few manual demonstrations one by one, each composed of a question\nand a reasoning chain that leads to an answer. The superior performance of the\nsecond paradigm hinges on the hand-crafting of task-specific demonstrations one\nby one. We show that such manual efforts may be eliminated by leveraging LLMs\nwith the \"Let's think step by step\" prompt to generate reasoning chains for\ndemonstrations one by one, i.e., let's think not just step by step, but also\none by one. However, these generated chains often come with mistakes. To\nmitigate the effect of such mistakes, we find that diversity matters for\nautomatically constructing demonstrations. We propose an automatic CoT\nprompting method: Auto-CoT. It samples questions with diversity and generates\nreasoning chains to construct demonstrations. On ten public benchmark reasoning\ntasks with GPT-3, Auto-CoT consistently matches or exceeds the performance of\nthe CoT paradigm that requires manual designs of demonstrations. Code is\navailable at https://github.com/amazon-research/auto-cot", + "authors": "Zhuosheng Zhang, Aston Zhang, Mu Li, Alex Smola", + "published": "2022-10-07", + "updated": "2022-10-07", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2101.11038v1", + "title": "Muppet: Massive Multi-task Representations with Pre-Finetuning", + "abstract": "We propose pre-finetuning, an additional large-scale learning stage between\nlanguage model pre-training and fine-tuning. Pre-finetuning is massively\nmulti-task learning (around 50 datasets, over 4.8 million total labeled\nexamples), and is designed to encourage learning of representations that\ngeneralize better to many different tasks. We show that pre-finetuning\nconsistently improves performance for pretrained discriminators (e.g.~RoBERTa)\nand generation models (e.g.~BART) on a wide range of tasks (sentence\nprediction, commonsense reasoning, MRC, etc.), while also significantly\nimproving sample efficiency during fine-tuning. We also show that large-scale\nmulti-tasking is crucial; pre-finetuning can hurt performance when few tasks\nare used up until a critical point (usually above 15) after which performance\nimproves linearly in the number of tasks.", + "authors": "Armen Aghajanyan, Anchit Gupta, Akshat Shrivastava, Xilun Chen, Luke Zettlemoyer, Sonal Gupta", + "published": "2021-01-26", + "updated": "2021-01-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2206.06315v1", + "title": "JiuZhang: A Chinese Pre-trained Language Model for Mathematical Problem Understanding", + "abstract": "This paper aims to advance the mathematical intelligence of machines by\npresenting the first Chinese mathematical pre-trained language model~(PLM) for\neffectively understanding and representing mathematical problems. Unlike other\nstandard NLP tasks, mathematical texts are difficult to understand, since they\ninvolve mathematical terminology, symbols and formulas in the problem\nstatement. Typically, it requires complex mathematical logic and background\nknowledge for solving mathematical problems.\n Considering the complex nature of mathematical texts, we design a novel\ncurriculum pre-training approach for improving the learning of mathematical\nPLMs, consisting of both basic and advanced courses. Specially, we first\nperform token-level pre-training based on a position-biased masking strategy,\nand then design logic-based pre-training tasks that aim to recover the shuffled\nsentences and formulas, respectively. Finally, we introduce a more difficult\npre-training task that enforces the PLM to detect and correct the errors in its\ngenerated solutions. We conduct extensive experiments on offline evaluation\n(including nine math-related tasks) and online $A/B$ test. Experimental results\ndemonstrate the effectiveness of our approach compared with a number of\ncompetitive baselines. Our code is available at:\n\\textcolor{blue}{\\url{https://github.com/RUCAIBox/JiuZhang}}.", + "authors": "Wayne Xin Zhao, Kun Zhou, Zheng Gong, Beichen Zhang, Yuanhang Zhou, Jing Sha, Zhigang Chen, Shijin Wang, Cong Liu, Ji-Rong Wen", + "published": "2022-06-13", + "updated": "2022-06-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02629v2", + "title": "BA-MoE: Boundary-Aware Mixture-of-Experts Adapter for Code-Switching Speech Recognition", + "abstract": "Mixture-of-experts based models, which use language experts to extract\nlanguage-specific representations effectively, have been well applied in\ncode-switching automatic speech recognition. However, there is still\nsubstantial space to improve as similar pronunciation across languages may\nresult in ineffective multi-language modeling and inaccurate language boundary\nestimation. To eliminate these drawbacks, we propose a cross-layer language\nadapter and a boundary-aware training method, namely Boundary-Aware\nMixture-of-Experts (BA-MoE). Specifically, we introduce language-specific\nadapters to separate language-specific representations and a unified gating\nlayer to fuse representations within each encoder layer. Second, we compute\nlanguage adaptation loss of the mean output of each language-specific adapter\nto improve the adapter module's language-specific representation learning.\nBesides, we utilize a boundary-aware predictor to learn boundary\nrepresentations for dealing with language boundary confusion. Our approach\nachieves significant performance improvement, reducing the mixture error rate\nby 16.55\\% compared to the baseline on the ASRU 2019 Mandarin-English\ncode-switching challenge dataset.", + "authors": "Peikun Chen, Fan Yu, Yuhao Lian, Hongfei Xue, Xucheng Wan, Naijun Zheng, Huan Zhou, Lei Xie", + "published": "2023-10-04", + "updated": "2023-10-08", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02410v1", + "title": "Mixture of Quantized Experts (MoQE): Complementary Effect of Low-bit Quantization and Robustness", + "abstract": "Large Mixture of Experts (MoE) models could achieve state-of-the-art quality\non various language tasks, including machine translation task, thanks to the\nefficient model scaling capability with expert parallelism. However, it has\nbrought a fundamental issue of larger memory consumption and increased memory\nbandwidth bottleneck at deployment time. In this paper, we propose Mixture of\nQuantized Experts (MoQE) which is a simple weight-only quantization method\napplying ultra low-bit down to 2-bit quantizations only to expert weights for\nmitigating the increased memory and latency issues of MoE models. We show that\nlow-bit quantization together with the MoE architecture delivers a reliable\nmodel performance while reducing the memory size significantly even without any\nadditional training in most cases. In particular, expert layers in MoE models\nare much more robust to the quantization than conventional feedforward networks\n(FFN) layers. In our comprehensive analysis, we show that MoE models with 2-bit\nexpert weights can deliver better model performance than the dense model\ntrained on the same dataset. As a result of low-bit quantization, we show the\nmodel size can be reduced by 79.6% of the original half precision floating\npoint (fp16) MoE model. Combined with an optimized GPU runtime implementation,\nit also achieves 1.24X speed-up on A100 GPUs.", + "authors": "Young Jin Kim, Raffy Fahim, Hany Hassan Awadalla", + "published": "2023-10-03", + "updated": "2023-10-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.14800v1", + "title": "Not All Experts are Equal: Efficient Expert Pruning and Skipping for Mixture-of-Experts Large Language Models", + "abstract": "A pivotal advancement in the progress of large language models (LLMs) is the\nemergence of the Mixture-of-Experts (MoE) LLMs. Compared to traditional LLMs,\nMoE LLMs can achieve higher performance with fewer parameters, but it is still\nhard to deploy them due to their immense parameter sizes. Different from\nprevious weight pruning methods that rely on specifically designed hardware,\nthis paper mainly aims to enhance the deployment efficiency of MoE LLMs by\nintroducing plug-and-play expert-level sparsification techniques. Specifically,\nwe propose, for the first time to our best knowledge, post-training approaches\nfor task-agnostic and task-specific expert pruning and skipping of MoE LLMs,\ntailored to improve deployment efficiency while maintaining model performance\nacross a wide range of tasks. Extensive experiments show that our proposed\nmethods can simultaneously reduce model sizes and increase the inference speed,\nwhile maintaining satisfactory performance. Data and code will be available at\nhttps://github.com/Lucky-Lance/Expert_Sparsity.", + "authors": "Xudong Lu, Qi Liu, Yuhui Xu, Aojun Zhou, Siyuan Huang, Bo Zhang, Junchi Yan, Hongsheng Li", + "published": "2024-02-22", + "updated": "2024-02-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2010.14260v2", + "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", + "abstract": "In this paper, we consider mixtures of two Mallows models for top-$k$\nrankings, both with the same location parameter but with different scale\nparameters, i.e., a mixture of concentric Mallows models. This situation arises\nwhen we have a heterogeneous population of voters formed by two homogeneous\npopulations, one of which is a subpopulation of expert voters while the other\nincludes the non-expert voters. We propose efficient sampling algorithms for\nMallows top-$k$ rankings. We show the identifiability of both components, and\nthe learnability of their respective parameters in this setting by, first,\nbounding the sample complexity for the Borda algorithm with top-$k$ rankings\nand second, proposing polynomial time algorithm for the separation of the\nrankings in each component. Finally, since the rank aggregation will suffer\nfrom a large amount of noise introduced by the non-expert voters, we adapt the\nBorda algorithm to be able to recover the ground truth consensus ranking which\nis especially consistent with the expert rankings.", + "authors": "Collas Fabien, Irurozki Ekhine", + "published": "2020-10-27", + "updated": "2020-11-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1809.04853v2", + "title": "Bayesian shrinkage in mixture of experts models: Identifying robust determinants of class membership", + "abstract": "A method for implicit variable selection in mixture of experts frameworks is\nproposed. We introduce a prior structure where information is taken from a set\nof independent covariates. Robust class membership predictors are identified\nusing a normal gamma prior. The resulting model setup is used in a finite\nmixture of Bernoulli distributions to find homogenous clusters of women in\nMozambique based on their information sources on HIV. Fully Bayesian inference\nis carried out via the implementation of a Gibbs sampler.", + "authors": "Gregor Zens", + "published": "2018-09-13", + "updated": "2019-01-12", + "primary_cat": "econ.EM", + "cats": [ + "econ.EM", + "62F15, 62J07, 62H30, 90-08" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2205.01848v2", + "title": "Optimizing Mixture of Experts using Dynamic Recompilations", + "abstract": "The Mixture of Experts architecture allows for outrageously large neural\nnetworks by scaling model parameter size independently from computational\ndemand (FLOPs). However, current DNN frameworks cannot effectively support the\ndynamic data flow in Mixture of Experts, and implementations on top of these\nframeworks need to use workarounds that introduce significant overheads. To\naddress the limitation of these frameworks, we present DynaMoE, a DNN library\nthat uses dynamic recompilations to optimize and adapt the use of computational\nresources to the dynamic needs of Mixture of Experts models. Our evaluation\nshows that DynaMoE achieves a 1.8x speedup and supports 2.3x larger model sizes\nwhen compared to existing MoE systems, even when not using recompilations. We\nthen present further optimizations enabled by dynamic recompilations that yield\nan additional 1.7x speedup while simultaneously reducing memory pressure and\nimproving model quality.", + "authors": "Ferdinand Kossmann, Zhihao Jia, Alex Aiken", + "published": "2022-05-04", + "updated": "2022-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12550v1", + "title": "Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization", + "abstract": "The Mixture of Experts (MoE) paradigm provides a powerful way to decompose\ninscrutable dense layers into smaller, modular computations often more amenable\nto human interpretation, debugging, and editability. A major problem however\nlies in the computational cost of scaling the number of experts to achieve\nsufficiently fine-grained specialization. In this paper, we propose the\nMultilinear Mixutre of Experts (MMoE) layer to address this, focusing on vision\nmodels. MMoE layers perform an implicit computation on prohibitively large\nweight tensors entirely in factorized form. Consequently, MMoEs both (1) avoid\nthe issues incurred through the discrete expert routing in the popular 'sparse'\nMoE models, yet (2) do not incur the restrictively high inference-time costs of\n'soft' MoE alternatives. We present both qualitative and quantitative evidence\n(through visualization and counterfactual interventions respectively) that\nscaling MMoE layers when fine-tuning foundation models for vision tasks leads\nto more specialized experts at the class-level whilst remaining competitive\nwith the performance of parameter-matched linear layer counterparts. Finally,\nwe show that learned expert specialism further facilitates manual correction of\ndemographic bias in CelebA attribute classification. Our MMoE model code is\navailable at https://github.com/james-oldfield/MMoE.", + "authors": "James Oldfield, Markos Georgopoulos, Grigorios G. Chrysos, Christos Tzelepis, Yannis Panagakis, Mihalis A. Nicolaou, Jiankang Deng, Ioannis Patras", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17404v1", + "title": "Generalization Error Analysis for Sparse Mixture-of-Experts: A Preliminary Study", + "abstract": "Mixture-of-Experts (MoE) represents an ensemble methodology that amalgamates\npredictions from several specialized sub-models (referred to as experts). This\nfusion is accomplished through a router mechanism, dynamically assigning\nweights to each expert's contribution based on the input data. Conventional MoE\nmechanisms select all available experts, incurring substantial computational\ncosts. In contrast, Sparse Mixture-of-Experts (Sparse MoE) selectively engages\nonly a limited number, or even just one expert, significantly reducing\ncomputation overhead while empirically preserving, and sometimes even\nenhancing, performance. Despite its wide-ranging applications and these\nadvantageous characteristics, MoE's theoretical underpinnings have remained\nelusive. In this paper, we embark on an exploration of Sparse MoE's\ngeneralization error concerning various critical factors. Specifically, we\ninvestigate the impact of the number of data samples, the total number of\nexperts, the sparsity in expert selection, the complexity of the routing\nmechanism, and the complexity of individual experts. Our analysis sheds light\non \\textit{how \\textbf{sparsity} contributes to the MoE's generalization},\noffering insights from the perspective of classical learning theory.", + "authors": "Jinze Zhao, Peihao Wang, Zhangyang Wang", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2303.06318v2", + "title": "A Hybrid Tensor-Expert-Data Parallelism Approach to Optimize Mixture-of-Experts Training", + "abstract": "Mixture-of-Experts (MoE) is a neural network architecture that adds sparsely\nactivated expert blocks to a base model, increasing the number of parameters\nwithout impacting computational costs. However, current distributed deep\nlearning frameworks are limited in their ability to train high-quality MoE\nmodels with large base models. In this work, we present DeepSpeed-TED, a novel,\nthree-dimensional, hybrid parallel algorithm that combines data, tensor, and\nexpert parallelism to enable the training of MoE models with 4 to 8x larger\nbase models than the current state-of-the-art. We also describe memory\noptimizations in the optimizer step, and communication optimizations that\neliminate unnecessary data movement. We implement our approach in DeepSpeed and\nachieve speedups of 26% over a baseline (i.e. without our communication\noptimizations) when training a 40 billion parameter MoE model (6.7 billion base\nmodel with 16 experts) on 128 V100 GPUs.", + "authors": "Siddharth Singh, Olatunji Ruwase, Ammar Ahmad Awan, Samyam Rajbhandari, Yuxiong He, Abhinav Bhatele", + "published": "2023-03-11", + "updated": "2023-05-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC", + "cs.PF" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.04377v2", + "title": "Convergence Rates for Gaussian Mixtures of Experts", + "abstract": "We provide a theoretical treatment of over-specified Gaussian mixtures of\nexperts with covariate-free gating networks. We establish the convergence rates\nof the maximum likelihood estimation (MLE) for these models. Our proof\ntechnique is based on a novel notion of \\emph{algebraic independence} of the\nexpert functions. Drawing on optimal transport theory, we establish a\nconnection between the algebraic independence and a certain class of partial\ndifferential equations (PDEs). Exploiting this connection allows us to derive\nconvergence rates and minimax lower bounds for parameter estimation.", + "authors": "Nhat Ho, Chiao-Yu Yang, Michael I. Jordan", + "published": "2019-07-09", + "updated": "2022-03-08", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "cs.LG", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.09368v2", + "title": "Mixture-of-Experts with Expert Choice Routing", + "abstract": "Sparsely-activated Mixture-of-experts (MoE) models allow the number of\nparameters to greatly increase while keeping the amount of computation for a\ngiven token or a given sample unchanged. However, a poor expert routing\nstrategy (e.g. one resulting in load imbalance) can cause certain experts to be\nunder-trained, leading to an expert being under or over-specialized. Prior work\nallocates a fixed number of experts to each token using a top-k function\nregardless of the relative importance of different tokens. To address this, we\npropose a heterogeneous mixture-of-experts employing an expert choice method.\nInstead of letting tokens select the top-k experts, we have experts selecting\nthe top-k tokens. As a result, each token can be routed to a variable number of\nexperts and each expert can have a fixed bucket size. We systematically study\npre-training speedups using the same computational resources of the Switch\nTransformer top-1 and GShard top-2 gating of prior work and find that our\nmethod improves training convergence time by more than 2x. For the same\ncomputational cost, our method demonstrates higher performance in fine-tuning\n11 selected tasks in the GLUE and SuperGLUE benchmarks. For a smaller\nactivation cost, our method outperforms the T5 dense model in 7 out of the 11\ntasks.", + "authors": "Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, James Laudon", + "published": "2022-02-18", + "updated": "2022-10-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2004.03751v4", + "title": "Robust Fitting of Mixture Models using Weighted Complete Estimating Equations", + "abstract": "Mixture modeling, which considers the potential heterogeneity in data, is\nwidely adopted for classification and clustering problems. Mixture models can\nbe estimated using the Expectation-Maximization algorithm, which works with the\ncomplete estimating equations conditioned by the latent membership variables of\nthe cluster assignment based on the hierarchical expression of mixture models.\nHowever, when the mixture components have light tails such as a normal\ndistribution, the mixture model can be sensitive to outliers. This study\nproposes a method of weighted complete estimating equations (WCE) for the\nrobust fitting of mixture models. Our WCE introduces weights to complete\nestimating equations such that the weights can automatically downweight the\noutliers. The weights are constructed similarly to the density power divergence\nfor mixture models, but in our WCE, they depend only on the component\ndistributions and not on the whole mixture. A novel\nexpectation-estimating-equation (EEE) algorithm is also developed to solve the\nWCE. For illustrative purposes, a multivariate Gaussian mixture, a mixture of\nexperts, and a multivariate skew normal mixture are considered, and how our EEE\nalgorithm can be implemented for these specific models is described. The\nnumerical performance of the proposed robust estimation method was examined\nusing simulated and real datasets.", + "authors": "Shonosuke Sugasawa, Genya Kobayashi", + "published": "2020-04-08", + "updated": "2022-03-17", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.00372v1", + "title": "Visual Saliency Prediction Using a Mixture of Deep Neural Networks", + "abstract": "Visual saliency models have recently begun to incorporate deep learning to\nachieve predictive capacity much greater than previous unsupervised methods.\nHowever, most existing models predict saliency using local mechanisms limited\nto the receptive field of the network. We propose a model that incorporates\nglobal scene semantic information in addition to local information gathered by\na convolutional neural network. Our model is formulated as a mixture of\nexperts. Each expert network is trained to predict saliency for a set of\nclosely related images. The final saliency map is computed as a weighted\nmixture of the expert networks' output, with weights determined by a separate\ngating network. This gating network is guided by global scene information to\npredict weights. The expert networks and the gating network are trained\nsimultaneously in an end-to-end manner. We show that our mixture formulation\nleads to improvement in performance over an otherwise identical non-mixture\nmodel that does not incorporate global scene information.", + "authors": "Samuel Dodge, Lina Karam", + "published": "2017-02-01", + "updated": "2017-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05220v1", + "title": "On Parameter Estimation in Deviated Gaussian Mixture of Experts", + "abstract": "We consider the parameter estimation problem in the deviated Gaussian mixture\nof experts in which the data are generated from $(1 - \\lambda^{\\ast}) g_0(Y|\nX)+ \\lambda^{\\ast} \\sum_{i = 1}^{k_{\\ast}} p_{i}^{\\ast}\nf(Y|(a_{i}^{\\ast})^{\\top}X+b_i^{\\ast},\\sigma_{i}^{\\ast})$, where $X, Y$ are\nrespectively a covariate vector and a response variable, $g_{0}(Y|X)$ is a\nknown function, $\\lambda^{\\ast} \\in [0, 1]$ is true but unknown mixing\nproportion, and $(p_{i}^{\\ast}, a_{i}^{\\ast}, b_{i}^{\\ast}, \\sigma_{i}^{\\ast})$\nfor $1 \\leq i \\leq k^{\\ast}$ are unknown parameters of the Gaussian mixture of\nexperts. This problem arises from the goodness-of-fit test when we would like\nto test whether the data are generated from $g_{0}(Y|X)$ (null hypothesis) or\nthey are generated from the whole mixture (alternative hypothesis). Based on\nthe algebraic structure of the expert functions and the distinguishability\nbetween $g_0$ and the mixture part, we construct novel Voronoi-based loss\nfunctions to capture the convergence rates of maximum likelihood estimation\n(MLE) for our models. We further demonstrate that our proposed loss functions\ncharacterize the local convergence rates of parameter estimation more\naccurately than the generalized Wasserstein, a loss function being commonly\nused for estimating parameters in the Gaussian mixture of experts.", + "authors": "Huy Nguyen, Khai Nguyen, Nhat Ho", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2006.13309v4", + "title": "Fast Deep Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts have become an indispensable tool for flexible modelling\nin a supervised learning context, allowing not only the mean function but the\nentire density of the output to change with the inputs. Sparse Gaussian\nprocesses (GP) have shown promise as a leading candidate for the experts in\nsuch models, and in this article, we propose to design the gating network for\nselecting the experts from such mixtures of sparse GPs using a deep neural\nnetwork (DNN). Furthermore, a fast one pass algorithm called\nCluster-Classify-Regress (CCR) is leveraged to approximate the maximum a\nposteriori (MAP) estimator extremely quickly. This powerful combination of\nmodel and algorithm together delivers a novel method which is flexible, robust,\nand extremely efficient. In particular, the method is able to outperform\ncompeting methods in terms of accuracy and uncertainty quantification. The cost\nis competitive on low-dimensional and small data sets, but is significantly\nlower for higher-dimensional and big data sets. Iteratively maximizing the\ndistribution of experts given allocations and allocations given experts does\nnot provide significant improvement, which indicates that the algorithm\nachieves a good approximation to the local MAP estimator very fast. This\ninsight can be useful also in the context of other mixture of experts models.", + "authors": "Clement Etienam, Kody Law, Sara Wade, Vitaly Zankin", + "published": "2020-06-11", + "updated": "2023-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.09179v3", + "title": "On the Representation Collapse of Sparse Mixture of Experts", + "abstract": "Sparse mixture of experts provides larger model capacity while requiring a\nconstant computational overhead. It employs the routing mechanism to distribute\ninput tokens to the best-matched experts according to their hidden\nrepresentations. However, learning such a routing mechanism encourages token\nclustering around expert centroids, implying a trend toward representation\ncollapse. In this work, we propose to estimate the routing scores between\ntokens and experts on a low-dimensional hypersphere. We conduct extensive\nexperiments on cross-lingual language model pre-training and fine-tuning on\ndownstream tasks. Experimental results across seven multilingual benchmarks\nshow that our method achieves consistent gains. We also present a comprehensive\nanalysis on the representation and routing behaviors of our models. Our method\nalleviates the representation collapse issue and achieves more consistent\nrouting than the baseline mixture-of-experts methods.", + "authors": "Zewen Chi, Li Dong, Shaohan Huang, Damai Dai, Shuming Ma, Barun Patra, Saksham Singhal, Payal Bajaj, Xia Song, Xian-Ling Mao, Heyan Huang, Furu Wei", + "published": "2022-04-20", + "updated": "2022-10-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1704.00946v4", + "title": "Approximation results regarding the multiple-output mixture of linear experts model", + "abstract": "Mixture of experts (MoE) models are a class of artificial neural networks\nthat can be used for functional approximation and probabilistic modeling. An\nimportant class of MoE models is the class of mixture of linear experts (MoLE)\nmodels, where the expert functions map to real topological output spaces. There\nare a number of powerful approximation results regarding MoLE models, when the\noutput space is univariate. These results guarantee the ability of MoLE mean\nfunctions to approximate arbitrary continuous functions, and MoLE models\nthemselves to approximate arbitrary conditional probability density functions.\nWe utilize and extend upon the univariate approximation results in order to\nprove a pair of useful results for situations where the output spaces are\nmultivariate.", + "authors": "Hien D. Nguyen, Faicel Chamroukhi, Florence Forbes", + "published": "2017-04-04", + "updated": "2019-05-28", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2112.14397v2", + "title": "EvoMoE: An Evolutional Mixture-of-Experts Training Framework via Dense-To-Sparse Gate", + "abstract": "Mixture-of-experts (MoE) is becoming popular due to its success in improving\nthe model quality, especially in Transformers. By routing tokens with a sparse\ngate to a few experts (i.e., a small pieces of the full model), MoE can easily\nincrease the model parameters to a very large scale while keeping the\ncomputation cost in a constant level. Most existing works just initialize some\nrandom experts, set a fixed gating strategy (e.g., Top-k), and train the model\nfrom scratch in an ad-hoc way. We identify that these MoE models are suffering\nfrom the immature experts and unstable sparse gate, which are harmful to the\nconvergence performance. In this paper, we propose an efficient end-to-end MoE\ntraining framework called EvoMoE. EvoMoE starts from training one single expert\nand gradually evolves into a large and sparse MoE structure. EvoMoE mainly\ncontains two phases: the expert-diversify phase to train the base expert for a\nwhile and spawn multiple diverse experts from it, and the gate-sparsify phase\nto learn an adaptive sparse gate and activate a dynamic number of experts.\nEvoMoE naturally decouples the joint learning of both the experts and the\nsparse gate and focuses on learning the basic knowledge with a single expert at\nthe early training stage. Then it diversifies the experts and continues to\ntrain the MoE with a novel Dense-to-Sparse gate (DTS-Gate). Specifically,\ninstead of using a permanent sparse gate, DTS-Gate begins as a dense gate that\nroutes tokens to all experts, then gradually and adaptively becomes sparser\nwhile routes to fewer experts. Evaluations are conducted on three popular\nmodels and tasks, including RoBERTa for masked language modeling task, GPT for\nlanguage modeling task and Transformer for machine translation task. The\nresults show that EvoMoE outperforms existing baselines, including Switch, BASE\nLayer, Hash Layer and StableMoE.", + "authors": "Xiaonan Nie, Xupeng Miao, Shijie Cao, Lingxiao Ma, Qibin Liu, Jilong Xue, Youshan Miao, Yi Liu, Zhi Yang, Bin Cui", + "published": "2021-12-29", + "updated": "2022-10-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.03292v1", + "title": "Enhancing Molecular Property Prediction via Mixture of Collaborative Experts", + "abstract": "Molecular Property Prediction (MPP) task involves predicting biochemical\nproperties based on molecular features, such as molecular graph structures,\ncontributing to the discovery of lead compounds in drug development. To address\ndata scarcity and imbalance in MPP, some studies have adopted Graph Neural\nNetworks (GNN) as an encoder to extract commonalities from molecular graphs.\nHowever, these approaches often use a separate predictor for each task,\nneglecting the shared characteristics among predictors corresponding to\ndifferent tasks. In response to this limitation, we introduce the GNN-MoCE\narchitecture. It employs the Mixture of Collaborative Experts (MoCE) as\npredictors, exploiting task commonalities while confronting the homogeneity\nissue in the expert pool and the decision dominance dilemma within the expert\ngroup. To enhance expert diversity for collaboration among all experts, the\nExpert-Specific Projection method is proposed to assign a unique projection\nperspective to each expert. To balance decision-making influence for\ncollaboration within the expert group, the Expert-Specific Loss is presented to\nintegrate individual expert loss into the weighted decision loss of the group\nfor more equitable training. Benefiting from the enhancements of MoCE in expert\ncreation, dynamic expert group formation, and experts' collaboration, our model\ndemonstrates superior performance over traditional methods on 24 MPP datasets,\nespecially in tasks with limited data or high imbalance.", + "authors": "Xu Yao, Shuang Liang, Songqiao Han, Hailiang Huang", + "published": "2023-12-06", + "updated": "2023-12-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "q-bio.QM" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1903.07756v1", + "title": "Hierarchical Routing Mixture of Experts", + "abstract": "In regression tasks the distribution of the data is often too complex to be\nfitted by a single model. In contrast, partition-based models are developed\nwhere data is divided and fitted by local models. These models partition the\ninput space and do not leverage the input-output dependency of\nmultimodal-distributed data, and strong local models are needed to make good\npredictions. Addressing these problems, we propose a binary tree-structured\nhierarchical routing mixture of experts (HRME) model that has classifiers as\nnon-leaf node experts and simple regression models as leaf node experts. The\nclassifier nodes jointly soft-partition the input-output space based on the\nnatural separateness of multimodal data. This enables simple leaf experts to be\neffective for prediction. Further, we develop a probabilistic framework for the\nHRME model, and propose a recursive Expectation-Maximization (EM) based\nalgorithm to learn both the tree structure and the expert models. Experiments\non a collection of regression tasks validate the effectiveness of our method\ncompared to a variety of other regression models.", + "authors": "Wenbo Zhao, Yang Gao, Shahan Ali Memon, Bhiksha Raj, Rita Singh", + "published": "2019-03-18", + "updated": "2019-03-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.02043v1", + "title": "mixdistreg: An R Package for Fitting Mixture of Experts Distributional Regression with Adaptive First-order Methods", + "abstract": "This paper presents a high-level description of the R software package\nmixdistreg to fit mixture of experts distributional regression models. The\nproposed framework is implemented in R using the deepregression software\ntemplate, which is based on TensorFlow and follows the neural structured\nadditive learning principle. The software comprises various approaches as\nspecial cases, including mixture density networks and mixture regression\napproaches. Various code examples are given to demonstrate the package's\nfunctionality.", + "authors": "David R\u00fcgamer", + "published": "2023-02-04", + "updated": "2023-02-04", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.04693v2", + "title": "GraphMETRO: Mitigating Complex Graph Distribution Shifts via Mixture of Aligned Experts", + "abstract": "Graph data are inherently complex and heterogeneous, leading to a high\nnatural diversity of distributional shifts. However, it remains unclear how to\nbuild machine learning architectures that generalize to complex non-synthetic\ndistributional shifts naturally occurring in the real world. Here we develop\nGraphMETRO, a Graph Neural Network architecture, that reliably models natural\ndiversity and captures complex distributional shifts. GraphMETRO employs a\nMixture-of-Experts (MoE) architecture with a gating model and multiple expert\nmodels, where each expert model targets a specific distributional shift to\nproduce a shift-invariant representation, and the gating model identifies shift\ncomponents. Additionally, we design a novel objective that aligns the\nrepresentations from different expert models to ensure smooth optimization.\nGraphMETRO achieves state-of-the-art results on four datasets from GOOD\nbenchmark comprised of complex and natural real-world distribution shifts,\nimproving by 67% and 4.2% on WebKB and Twitch datasets.", + "authors": "Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, Jure Leskovec", + "published": "2023-12-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.13850v2", + "title": "Statistical Perspective of Top-K Sparse Softmax Gating Mixture of Experts", + "abstract": "Top-K sparse softmax gating mixture of experts has been widely used for\nscaling up massive deep-learning architectures without increasing the\ncomputational cost. Despite its popularity in real-world applications, the\ntheoretical understanding of that gating function has remained an open problem.\nThe main challenge comes from the structure of the top-K sparse softmax gating\nfunction, which partitions the input space into multiple regions with distinct\nbehaviors. By focusing on a Gaussian mixture of experts, we establish\ntheoretical results on the effects of the top-K sparse softmax gating function\non both density and parameter estimations. Our results hinge upon defining\nnovel loss functions among parameters to capture different behaviors of the\ninput regions. When the true number of experts $k_{\\ast}$ is known, we\ndemonstrate that the convergence rates of density and parameter estimations are\nboth parametric on the sample size. However, when $k_{\\ast}$ becomes unknown\nand the true model is over-specified by a Gaussian mixture of $k$ experts where\n$k > k_{\\ast}$, our findings suggest that the number of experts selected from\nthe top-K sparse softmax gating function must exceed the total cardinality of a\ncertain number of Voronoi cells associated with the true parameters to\nguarantee the convergence of the density estimation. Moreover, while the\ndensity estimation rate remains parametric under this setting, the parameter\nestimation rates become substantially slow due to an intrinsic interaction\nbetween the softmax gating and expert functions.", + "authors": "Huy Nguyen, Pedram Akbarian, Fanqi Yan, Nhat Ho", + "published": "2023-09-25", + "updated": "2024-02-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.08245v1", + "title": "Scattered Mixture-of-Experts Implementation", + "abstract": "We present ScatterMoE, an implementation of Sparse Mixture-of-Experts (SMoE)\non GPUs. ScatterMoE builds upon existing implementations, and overcoming some\nof the limitations to improve inference and training speed, and memory\nfootprint. This implementation achieves this by avoiding padding and making\nexcessive copies of the input. We introduce ParallelLinear, the main component\nwe use to build our implementation and the various kernels used to speed up the\noperation. We benchmark our implementation against Megablocks, and show that it\nenables a higher throughput and lower memory footprint. We also show how\nParallelLinear enables extension of the Mixture-of-Experts concept by\ndemonstrating with an implementation of Mixture of Attention.", + "authors": "Shawn Tan, Yikang Shen, Rameswar Panda, Aaron Courville", + "published": "2024-03-13", + "updated": "2024-03-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17749v1", + "title": "Multi-Task Dense Prediction via Mixture of Low-Rank Experts", + "abstract": "Previous multi-task dense prediction methods based on the Mixture of Experts\n(MoE) have received great performance but they neglect the importance of\nexplicitly modeling the global relations among all tasks. In this paper, we\npresent a novel decoder-focused method for multi-task dense prediction, called\nMixture-of-Low-Rank-Experts (MLoRE). To model the global task relationships,\nMLoRE adds a generic convolution path to the original MoE structure, where each\ntask feature can go through this path for explicit parameter sharing.\nFurthermore, to control the parameters and computational cost brought by the\nincrease in the number of experts, we take inspiration from LoRA and propose to\nleverage the low-rank format of a vanilla convolution in the expert network.\nSince the low-rank experts have fewer parameters and can be dynamically\nparameterized into the generic convolution, the parameters and computational\ncost do not change much with the increase of experts. Benefiting from this\ndesign, we increase the number of experts and its reception field to enlarge\nthe representation capacity, facilitating multiple dense tasks learning in a\nunified network. Extensive experiments on the PASCAL-Context and NYUD-v2\nbenchmarks show that our MLoRE achieves superior performance compared to\nprevious state-of-the-art methods on all metrics. Our code is available at\nhttps://github.com/YuqiYang213/MLoRE.", + "authors": "Yuqi Yang, Peng-Tao Jiang, Qibin Hou, Hao Zhang, Jinwei Chen, Bo Li", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2305.03288v2", + "title": "Demystifying Softmax Gating Function in Gaussian Mixture of Experts", + "abstract": "Understanding the parameter estimation of softmax gating Gaussian mixture of\nexperts has remained a long-standing open problem in the literature. It is\nmainly due to three fundamental theoretical challenges associated with the\nsoftmax gating function: (i) the identifiability only up to the translation of\nparameters; (ii) the intrinsic interaction via partial differential equations\nbetween the softmax gating and the expert functions in the Gaussian density;\n(iii) the complex dependence between the numerator and denominator of the\nconditional density of softmax gating Gaussian mixture of experts. We resolve\nthese challenges by proposing novel Voronoi loss functions among parameters and\nestablishing the convergence rates of maximum likelihood estimator (MLE) for\nsolving parameter estimation in these models. When the true number of experts\nis unknown and over-specified, our findings show a connection between the\nconvergence rate of the MLE and a solvability problem of a system of polynomial\nequations.", + "authors": "Huy Nguyen, TrungTin Nguyen, Nhat Ho", + "published": "2023-05-05", + "updated": "2023-10-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "math.ST", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.11706v1", + "title": "Mixture of ELM based experts with trainable gating network", + "abstract": "Mixture of experts method is a neural network based ensemble learning that\nhas great ability to improve the overall classification accuracy. This method\nis based on the divide and conquer principle, in which the problem space is\ndivided between several experts by supervisition of gating network. In this\npaper, we propose an ensemble learning method based on mixture of experts which\nis named mixture of ELM based experts with trainable gating network (MEETG) to\nimprove the computing cost and to speed up the learning process of ME. The\nstructure of ME consists of multi layer perceptrons (MLPs) as base experts and\ngating network, in which gradient-based learning algorithm is applied for\ntraining the MLPs which is an iterative and time consuming process. In order to\novercome on these problems, we use the advantages of extreme learning machine\n(ELM) for designing the structure of ME. ELM as a learning algorithm for single\nhidden-layer feed forward neural networks provides much faster learning process\nand better generalization ability in comparision with some other traditional\nlearning algorithms. Also, in the proposed method a trainable gating network is\napplied to aggregate the outputs of the experts dynamically according to the\ninput sample. Our experimental results and statistical analysis on 11 benchmark\ndatasets confirm that MEETG has an acceptable performance in classification\nproblems. Furthermore, our experimental results show that the proposed approach\noutperforms the original ELM on prediction stability and classification\naccuracy.", + "authors": "Laleh Armi, Elham Abbasi, Jamal Zarepour-Ahmadabadi", + "published": "2021-05-25", + "updated": "2021-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09762v1", + "title": "Diversifying the Mixture-of-Experts Representation for Language Models with Orthogonal Optimizer", + "abstract": "The Mixture of Experts (MoE) has emerged as a highly successful technique in\ndeep learning, based on the principle of divide-and-conquer to maximize model\ncapacity without significant additional computational cost. Even in the era of\nlarge-scale language models (LLMs), MoE continues to play a crucial role, as\nsome researchers have indicated that GPT-4 adopts the MoE structure to ensure\ndiverse inference results. However, MoE is susceptible to performance\ndegeneracy, particularly evident in the issues of imbalance and homogeneous\nrepresentation among experts. While previous studies have extensively addressed\nthe problem of imbalance, the challenge of homogeneous representation remains\nunresolved. In this study, we shed light on the homogeneous representation\nproblem, wherein experts in the MoE fail to specialize and lack diversity,\nleading to frustratingly high similarities in their representations (up to 99%\nin a well-performed MoE model). This problem restricts the expressive power of\nthe MoE and, we argue, contradicts its original intention. To tackle this\nissue, we propose a straightforward yet highly effective solution: OMoE, an\northogonal expert optimizer. Additionally, we introduce an alternating training\nstrategy that encourages each expert to update in a direction orthogonal to the\nsubspace spanned by other experts. Our algorithm facilitates MoE training in\ntwo key ways: firstly, it explicitly enhances representation diversity, and\nsecondly, it implicitly fosters interaction between experts during orthogonal\nweights computation. Through extensive experiments, we demonstrate that our\nproposed optimization algorithm significantly improves the performance of\nfine-tuning the MoE model on the GLUE benchmark, SuperGLUE benchmark,\nquestion-answering task, and name entity recognition tasks.", + "authors": "Boan Liu, Liang Ding, Li Shen, Keqin Peng, Yu Cao, Dazhao Cheng, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-10-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.14976v4", + "title": "MoCaE: Mixture of Calibrated Experts Significantly Improves Object Detection", + "abstract": "Combining the strengths of many existing predictors to obtain a Mixture of\nExperts which is superior to its individual components is an effective way to\nimprove the performance without having to develop new architectures or train a\nmodel from scratch. However, surprisingly, we find that na\\\"ively combining\nexpert object detectors in a similar way to Deep Ensembles, can often lead to\ndegraded performance. We identify that the primary cause of this issue is that\nthe predictions of the experts do not match their performance, a term referred\nto as miscalibration. Consequently, the most confident detector dominates the\nfinal predictions, preventing the mixture from leveraging all the predictions\nfrom the experts appropriately. To address this, when constructing the Mixture\nof Experts, we propose to combine their predictions in a manner which reflects\nthe individual performance of the experts; an objective we achieve by first\ncalibrating the predictions before filtering and refining them. We term this\napproach the Mixture of Calibrated Experts and demonstrate its effectiveness\nthrough extensive experiments on 5 different detection tasks using a variety of\ndetectors, showing that it: (i) improves object detectors on COCO and instance\nsegmentation methods on LVIS by up to $\\sim 2.5$ AP; (ii) reaches\nstate-of-the-art on COCO test-dev with $65.1$ AP and on DOTA with $82.62$\n$\\mathrm{AP_{50}}$; (iii) outperforms single models consistently on recent\ndetection tasks such as Open Vocabulary Object Detection.", + "authors": "Kemal Oksuz, Selim Kuzucu, Tom Joy, Puneet K. Dokania", + "published": "2023-09-26", + "updated": "2024-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05838v1", + "title": "Liu-type Shrinkage Estimators for Mixture of Poisson Regressions with Experts: A Heart Disease Study", + "abstract": "Count data play a critical role in medical research, such as heart disease.\nThe Poisson regression model is a common technique for evaluating the impact of\na set of covariates on the count responses. The mixture of Poisson regression\nmodels with experts is a practical tool to exploit the covariates, not only to\nhandle the heterogeneity in the Poisson regressions but also to learn the\nmixing structure of the population. Multicollinearity is one of the most common\nchallenges with regression models, leading to ill-conditioned design matrices\nof Poisson regression components and expert classes. The maximum likelihood\nmethod produces unreliable and misleading estimates for the effects of the\ncovariates in multicollinearity. In this research, we develop Ridge and\nLiu-type methods as two shrinkage approaches to cope with the ill-conditioned\ndesign matrices of the mixture of Poisson regression models with experts.\nThrough various numerical studies, we demonstrate that the shrinkage methods\noffer more reliable estimates for the coefficients of the mixture model in\nmulticollinearity while maintaining the classification performance of the ML\nmethod. The shrinkage methods are finally applied to a heart study to analyze\nthe heart disease rate stages.", + "authors": "Elsayed Ghanem, Moein Yoosefi, Armin Hatefi", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "stat.CO", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1409.4698v1", + "title": "A Mixtures-of-Experts Framework for Multi-Label Classification", + "abstract": "We develop a novel probabilistic approach for multi-label classification that\nis based on the mixtures-of-experts architecture combined with recently\nintroduced conditional tree-structured Bayesian networks. Our approach captures\ndifferent input-output relations from multi-label data using the efficient\ntree-structured classifiers, while the mixtures-of-experts architecture aims to\ncompensate for the tree-structured restrictions and build a more accurate\nmodel. We develop and present algorithms for learning the model from data and\nfor performing multi-label predictions on future data instances. Experiments on\nmultiple benchmark datasets demonstrate that our approach achieves highly\ncompetitive results and outperforms the existing state-of-the-art multi-label\nclassification methods.", + "authors": "Charmgil Hong, Iyad Batal, Milos Hauskrecht", + "published": "2014-09-16", + "updated": "2014-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "I.2.6" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2011.01613v1", + "title": "Towards a Universal Gating Network for Mixtures of Experts", + "abstract": "The combination and aggregation of knowledge from multiple neural networks\ncan be commonly seen in the form of mixtures of experts. However, such\ncombinations are usually done using networks trained on the same tasks, with\nlittle mention of the combination of heterogeneous pre-trained networks,\nespecially in the data-free regime. This paper proposes multiple data-free\nmethods for the combination of heterogeneous neural networks, ranging from the\nutilization of simple output logit statistics, to training specialized gating\nnetworks. The gating networks decide whether specific inputs belong to specific\nnetworks based on the nature of the expert activations generated. The\nexperiments revealed that the gating networks, including the universal gating\napproach, constituted the most accurate approach, and therefore represent a\npragmatic step towards applications with heterogeneous mixtures of experts in a\ndata-free regime. The code for this project is hosted on github at\nhttps://github.com/cwkang1998/network-merging.", + "authors": "Chen Wen Kang, Chua Meng Hong, Tomas Maul", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.12830v1", + "title": "Mixtures of Gaussian Process Experts with SMC$^2$", + "abstract": "Gaussian processes are a key component of many flexible statistical and\nmachine learning models. However, they exhibit cubic computational complexity\nand high memory constraints due to the need of inverting and storing a full\ncovariance matrix. To circumvent this, mixtures of Gaussian process experts\nhave been considered where data points are assigned to independent experts,\nreducing the complexity by allowing inference based on smaller, local\ncovariance matrices. Moreover, mixtures of Gaussian process experts\nsubstantially enrich the model's flexibility, allowing for behaviors such as\nnon-stationarity, heteroscedasticity, and discontinuities. In this work, we\nconstruct a novel inference approach based on nested sequential Monte Carlo\nsamplers to simultaneously infer both the gating network and Gaussian process\nexpert parameters. This greatly improves inference compared to importance\nsampling, particularly in settings when a stationary Gaussian process is\ninappropriate, while still being thoroughly parallelizable.", + "authors": "Teemu H\u00e4rk\u00f6nen, Sara Wade, Kody Law, Lassi Roininen", + "published": "2022-08-26", + "updated": "2022-08-26", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.06966v1", + "title": "Acquiring Diverse Skills using Curriculum Reinforcement Learning with Mixture of Experts", + "abstract": "Reinforcement learning (RL) is a powerful approach for acquiring a\ngood-performing policy. However, learning diverse skills is challenging in RL\ndue to the commonly used Gaussian policy parameterization. We propose\n\\textbf{Di}verse \\textbf{Skil}l \\textbf{L}earning (Di-SkilL), an RL method for\nlearning diverse skills using Mixture of Experts, where each expert formalizes\na skill as a contextual motion primitive. Di-SkilL optimizes each expert and\nits associate context distribution to a maximum entropy objective that\nincentivizes learning diverse skills in similar contexts. The per-expert\ncontext distribution enables automatic curricula learning, allowing each expert\nto focus on its best-performing sub-region of the context space. To overcome\nhard discontinuities and multi-modalities without any prior knowledge of the\nenvironment's unknown context probability space, we leverage energy-based\nmodels to represent the per-expert context distributions and demonstrate how we\ncan efficiently train them using the standard policy gradient objective. We\nshow on challenging robot simulation tasks that Di-SkilL can learn diverse and\nperformant skills.", + "authors": "Onur Celik, Aleksandar Taranovic, Gerhard Neumann", + "published": "2024-03-11", + "updated": "2024-03-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.03994v1", + "title": "Video Relationship Detection Using Mixture of Experts", + "abstract": "Machine comprehension of visual information from images and videos by neural\nnetworks faces two primary challenges. Firstly, there exists a computational\nand inference gap in connecting vision and language, making it difficult to\naccurately determine which object a given agent acts on and represent it\nthrough language. Secondly, classifiers trained by a single, monolithic neural\nnetwork often lack stability and generalization. To overcome these challenges,\nwe introduce MoE-VRD, a novel approach to visual relationship detection\nutilizing a mixture of experts. MoE-VRD identifies language triplets in the\nform of < subject, predicate, object> tuples to extract relationships from\nvisual processing. Leveraging recent advancements in visual relationship\ndetection, MoE-VRD addresses the requirement for action recognition in\nestablishing relationships between subjects (acting) and objects (being acted\nupon). In contrast to single monolithic networks, MoE-VRD employs multiple\nsmall models as experts, whose outputs are aggregated. Each expert in MoE-VRD\nspecializes in visual relationship learning and object tagging. By utilizing a\nsparsely-gated mixture of experts, MoE-VRD enables conditional computation and\nsignificantly enhances neural network capacity without increasing computational\ncomplexity. Our experimental results demonstrate that the conditional\ncomputation capabilities and scalability of the mixture-of-experts approach\nlead to superior performance in visual relationship detection compared to\nstate-of-the-art methods.", + "authors": "Ala Shaabana, Zahra Gharaee, Paul Fieguth", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + } + ], + [ + { + "url": "http://arxiv.org/abs/2306.01105v2", + "title": "Revisiting Hate Speech Benchmarks: From Data Curation to System Deployment", + "abstract": "Social media is awash with hateful content, much of which is often veiled\nwith linguistic and topical diversity. The benchmark datasets used for hate\nspeech detection do not account for such divagation as they are predominantly\ncompiled using hate lexicons. However, capturing hate signals becomes\nchallenging in neutrally-seeded malicious content. Thus, designing models and\ndatasets that mimic the real-world variability of hate warrants further\ninvestigation.\n To this end, we present GOTHate, a large-scale code-mixed crowdsourced\ndataset of around 51k posts for hate speech detection from Twitter. GOTHate is\nneutrally seeded, encompassing different languages and topics. We conduct\ndetailed comparisons of GOTHate with the existing hate speech datasets,\nhighlighting its novelty. We benchmark it with 10 recent baselines. Our\nextensive empirical and benchmarking experiments suggest that GOTHate is hard\nto classify in a text-only setup. Thus, we investigate how adding endogenous\nsignals enhances the hate speech detection task. We augment GOTHate with the\nuser's timeline information and ego network, bringing the overall data source\ncloser to the real-world setup for understanding hateful content. Our proposed\nsolution HEN-mBERT is a modular, multilingual, mixture-of-experts model that\nenriches the linguistic subspace with latent endogenous signals from history,\ntopology, and exemplars. HEN-mBERT transcends the best baseline by 2.5% and 5%\nin overall macro-F1 and hate class F1, respectively. Inspired by our\nexperiments, in partnership with Wipro AI, we are developing a semi-automated\npipeline to detect hateful content as a part of their mission to tackle online\nharm.", + "authors": "Atharva Kulkarni, Sarah Masud, Vikram Goyal, Tanmoy Chakraborty", + "published": "2023-06-01", + "updated": "2023-06-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "Hate speech datasets: Based on the hypothesis that gender and race are primary targets of hate speech, Waseem and Hovy [61] released a dataset of 16\ud835\udc58tweets labeled in-house. Davidson et al. [13] released a crowd-sourced annotated dataset of size 25\ud835\udc58tweets. Later Founta et al. [19] provided a large-scale corpus of 80\ud835\udc58English tweets curated via Twitter streaming API. However, they applied bootstrapped sampling to enhance the volume of minority classes. Recently, Toraman et al. [58] proposed another large-scale crowdsourced dataset for English and Turkish (100\ud835\udc58each) curated across Revisiting Hate Speech Benchmarks: From Data Curation to System Deployment KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Table 7: Error analysis of mBERT and HEN-mBERT analyzing examples of correct classification, misclassification, and mislabelling. Test tweet Gold mBERT HEN-mBERT Example 1: $MENTION$ $MENTION$ Arvind Kejriwal\u2019s AAP is ISIS OF INDIA. HIS PROPAGANDA WILL DESTROY HINDUS FROM INDIA WITH HELP OF CONGRESS AND LEFTIST. SAVE HINDU SAVE BHARAT. SAVE BRAHMIN , SAVE DALITS, SAVE HINDU OTHER CAST delhi burns delhi riots2020 arrest sonia gandhi delhi voilence O O O Example 2: this must have been a apka tahir offer to jihadis (terrorist) kill kaffirs, loot their property, do what u want with any kaffir (non-beliver) female that \"ur right hand posseses\" ...in short, maal-e-ganimat delhi riots delhi riots H P H Example 3: \ud835\udc40\ud835\udc38\ud835\udc41\ud835\udc47\ud835\udc3c\ud835\udc42\ud835\udc41It\u2019s not the number, it\u2019s the % increase and the doubling time. This administration is just not up to it\u2019 They thought br exit would be easy and were wrong, and they are in grave danger of causing much misery through vacillation and poor choices. covid19 $URL P P O this must have been a apka tahir offer to jihadis kill kaffirs, loot their property, do what u want with any kaffir female that \"ur right hand posseses\" ...in short, maal-e-ganimat delhi riots delhi riots \"Where's the outrage? Where's the Musalman-Khatre-Mein-hai Gang? Where's the Bhim army now? Pathetic cronies. hindu lives matter \\$URL\\$\" look at what Hindus living in mixed-population localities are facing, what $MENTION$ had to face for merely asking his Muslim neighbors not to harass his daughter sexually...and even then if u ask why people don't rent to Muslims, get ur head examined. $MENTION$ and $MENTION$ naah...Islamists will never accept Muslim refugees; they will tell the Muslims to create havoc in their home countries and do whatever it takes to convert Dar-ul-Harb into Dar-ul-Islam. Something we should seriously consider doing with Pak Hindus too Provocation\u00a0 Incoming post by user\u00a0 Real-time extraction of recent posts by user tafter tbefore Dense retrival to extract exemplars from KB Real-time extraction of recent interactions by user Contextual Hate Speech Detection System Final label Timeline Module Network Interaction Module Exemplar Extraction Module Content Moderator Semi-Automated Human Moderator Provocation\u00a0 Prediction label User Ego network of user Figure 4: The desired pipeline being developed for semi-automated flagging of hateful content. For an incoming post, we query the user\u2019s timeline and user interaction to extract their most recent footprint. Additionally, we query the existing exemplar base to obtain the exemplar. The combination of incoming posts and auxiliary signals is employed for contextual hate speech detection. Meanwhile, these signals and predicted labels will be provided to content moderators to confirm the label. five topics. Here too, the authors relied on manually curated keywords to sample tweets per topic. Researchers are now focusing on datasets specific to diverse languages [33, 39, 41, 49, 50]. Studies are also being conducted to understand the subtle forms of implicit hate [16, 26]. While a few neutrally-seeded datasets also exist [5, 14], they tend to focus more on a single event or target. Methods for hate speech classification: Systems designed to detect hate speech range from employing feature-engineered logistic regression [13, 61] to non-contextual embedding combined with vanilla CNN and LSTM [4]. Nowadays, transformer [59] based language models (LM) are being used effectively for hate speech detection [9, 38]. Methods have also explored using additional tasks to aid hate speech detection, such as emotions [3, 11] and sentiment [8, 65] etc.. Valiant efforts are being made to explore the usage of topical, historical [45], network data [22].", + "pre_questions": [], + "main_content": "INTRODUCTION Due to their democratized nature, social media platforms serve as breeding grounds for the unwarranted spread of hate speech [10, 18, 35], opprobrious offensive opinions [64], and provocative propaganda [12]. Such proliferation of hate speech disrupts the harmony and cohesiveness of online and offline communities [6, 36]. Thus, the diachronic study of hate speech\u2019s digital footprint has been an active research topic. Despite the aggressive work in this domain, we are yet to confront the myriad issues associated with hate speech benchmarking in a consistent manner. Limitations of benchmark hate speech datasets: The common vogue for curating hate speech datasets is using hate lexicons [13, 19, 61] and libelous identifiers of race, gender, religion, and culture [56]. This presents a rather myopic approach, focusing on slur words without considering the text\u2019s semantics [52, 55]. In real-world discourse, hate speech stems from extremist views and prejudiced perspectives posted in response to a real-world event. Besides, hate speech\u2019s dynamics, syntax, and semantics are bound to change following new triggers [17, 21]. Thus, identifying hateful posts in such scenarios requires going beyond language and keywordbased reliance and comprehending the post\u2019s topical and contextual information. This makes hate speech detection highly contextual, necessitating topical knowledge [35], commonsense reasoning [1], comprehension of stereotypes [60], and cultural references [7, 57]. *Equal Contributions. arXiv:2306.01105v2 [cs.CL] 15 Jun 2023 KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Kulkarni et al. Moreover, the lexicon strategy yields highly biased data, reflecting annotations far too skewed to accurately represent the realworld distribution of hate speech online [21]. It also leads to low coverage of nuanced hate speech constructs [16]. Furthermore, nonobvious hate speech reduces the semantic diversity between hateful and benign posts [2]. The issue is exacerbated in the case of finegrained hate speech classification, wherein the already indistinct label boundaries [2] get even blurrier. While a few neutrally-seeded datasets also exist [5, 14], they tend to focus more on controversial events (e.g., the Lee Rigby murder) or specific hate targets (e.g., immigrants), which may introduce topic bias and artificially inflate model performance. Therefore, a dataset covering diverse but disparate topics and languages is the need of the hour. Proposed dataset: To this end, we curate Geo-pOlitical Topical Hate dataset (GOTHate), a comprehensive fine-grained hate speech classification dataset encompassing nuanced and realistic aspects of online hateful discourse. GOTHate is a conglomerate of tweets from seven assorted topics spanning socio-political events and world affairs, each annotated with one of the four labels of hate, offensive, provocative, and non-hate. We follow a neutral seeding policy, wherein the data is collected for each topic across relevant periods and then annotated for fine-grained hate labels. Therefore, the dataset is characterized by more cohesion within the topics, resulting in less linguistic, syntactic, and contextual diversity across the labels. Besides, the dataset covers English, Hindi, and Hinglish, exhibiting linguistic variations. In summary, GOTHate is a competitive dataset miming real-world online hate speech discourse. Note the dataset is not touted as a general reference dataset since such a concept is difficult to define for hate speech. We rather obtain a more \u2018generic view\u2019 of social media discourse using neutral topic-based scrapping. Figure 1 illustrates representative samples of GOTHate. The hateful example critically targets the liberals and the Muslim community. However, the tweet\u2019s indirect language and requirement of knowledge about \u2018Islamophobia\u2019 makes it difficult to mark it as hate. The offensive statement targetting \u2018Narendra Modi\u2019 and \u2018Amit Shah\u2019 makes a demeaning comparison between the \u2018Gobar\u2019 (cow dung) and \u2018Gobar Bhakts\u2019 (dung followers) without using any explicit swearwords. The provocative example invokes a call to action in the context of #HinduLivesMatter. Lastly, the benign example hints at a subtle mockery of \u2018Xi Jinping\u2019 but is not harsh enough to mark it as either hateful, offensive, or provocative. In short, GOTHate can be challenging for hate speech classification models. Benchmarking methods: We benchmark GOTHate against 10 baseline models. Further, to capture the intricacies of GOTHate, we present a novel model, History-Examplar-Network Infused mBERT (HEN-mBERT). It is a mixture-of-experts variant of multilingual BERT augmented with endogenous signals. The Timeline Module tactfully subsumes the users\u2019 historical data to accentuate any hateful bias present in the user\u2019s posting history. The Exemplar Module retrieves exemplary tweets that empower the model to distinguish similar and dissimilar features between the labels. The Graph Module extracts information from the users\u2019 social connections via its ego network. Finally, the Reasoning Module aggregates these multiple experts via attention-based mutual interaction. Empirical results attest that HEN-mBERT outperforms the best baseline by 2.5% points in macro-F1 and 5% points in hate class F1. Overall, our experiments attest that GOTHate is a tough dataset for hate speech classification. In summary, our main contributions are as follows: \u2022 Novel dataset: We release a neutrally seeded, hate speech dataset that spans seven topics, three geographies and three languages. Additionally, we provide detailed guidelines for annotation and introduce a new category of provocation (Section 3). \u2022 Cross-dataset study: We perform various experiments to analyze GOTHate against the existing hate speech datasets and establish the challenges GOTHate brings in (Section 4). \u2022 Incorporation of endogenous signal: We also experiment with various endogenous signals to enhance the detection of hateful content. The combination of such extensive primary and auxiliary data, combining limited labeled information with plentiful unlabeled but contextual information, nudges for a thorough and systematic study of hate speech detection (Section 5). \u2022 Benchmarking: We benchmark GOTHate with ten diverse and widely-studied baseline methods (Section 6). \u2022 Content moderation pipeline: This research has led to the creation of a hate speech detection pipeline currently under development in collaboration with Wipro AI [34, 35] (Section 7). Reproducibility: The source code and sample dataset are publicly available on our Github2. Given the data-sharing policy of Twitter, we will not be able to release the tweet text publicly, but we will realize the full set of tweet id and their labels. Additionally, the full dataset (in text and network form) will be available to researchers upon request, as is the case with the existing hate speech datasharing policy. Limitations and Ethical considerations regarding our dataset and annotation are outlined in Appendices A and B. The system can also be extended to other online forums with platformspecific endogenous features, such as LinkedIn, Reddit, etc. 2 MOTIVATION Hypothesis I: There are extremely few semantic and linguistic differences in real-world online hate speech rhetoric around a given topic. Moreover, there is a dearth of dataset that does not accentuate the concept of hatred and represents authentic, real-world speech. Thus, akin to real-world, GOTHate features users who post hateful and non-hateful tweets. This intersection, coupled with the topical diversity, neutral seeding, and multi-linguality, makes it a bona fide hate speech corpus that is more difficult to classify than its counterparts. We empirically establish the same in Section 4. Hypothesis II: Most contemporary hate speech datasets are purely text-based with no metadata information, which could be crucial for accurate classification. Thus, in GOTHate, we add endogenous signals (auxiliary data) and combine them with the incoming posts (primary data). We capture a user\u2019s intrinsic behavior by incorporating posting history. We consider each user\u2019s ego network to encompass which other users influence their opinions. One can also employ the similarity and diversity from the same label set present within GOTHate. The use of exemplar samples can imbue this. The intuition for the individual endogenous signals is highlighted in Section 5. The advantage of including each signal is discussed during performance comparison and error analysis in Section 6. Hypothesis III: The content moderators in the real world do not operate in isolation but rather consider the contextual information of who is posting what. By incorporating endogenous signals for 2https://github.com/LCS2-IIITD/GotHate Revisiting Hate Speech Benchmarks: From Data Curation to System Deployment KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA hate detection, we aim to replicate the meta-data-rich dashboards that content moderators can access when moderating for a socialmedia platform. Therefore, we aim to bring manual and automated content moderation closer. Our experiments further showcase a gap in automated speech detection systems, which can best operate with a human-in-the-loop (Section 7). 3 DATASET For this study, we concentrate on hateful content on Twitter, owing to the ease of extracting publicly-available posts and their metadata. Primary data: GOTHate consists of tweets classified into four categories \u2013 Hate (H), Offensive (O), Provocative (P), and Neutral/nonhate (N). The examples span English, code-mixed Hinglish, and Devanagari Hindi. Using the Twitter API3, we compile a corpus of 51, 367 tweets posted by 25, 796 unique users, henceforth referred to as root tweets and root users, respectively. Table 1 presents the topical and label-based overview of GOTHate. It spans seven sociopolitical events (topics) across 3 geographies (USA, UK, and India). Instead of relying on hate lexicons, we collect the tweets using neutral topics, such as \u2018demonetization\u2019 (India) and \u2018Brexit\u2019 (UK) that garner varying degrees of hate, backlash, and support. Table 1: Statistical information of GOTHate with unique tweets (users) per label. The user participating in a topic/label subset may not be exclusive to that subset. Label set consists of Hate (H), Offensive (O), Provocative (P) and Neutral (N). Topic Label-wise unique tweets (users) (country of origin) H O P N Never Trump Campaign (USA) 5 (5) 273 (222) 536 (334) 1443 (889) Delhi Roits 2020 (India) 1503 (1142) 2071 (1526) 4776 (3248) 8508 (4795) Demonetization (India) 574 (495) 915 (750) 1446 (1154) 3643 (2112) Brexit (UK) 38 (35) 505 (423) 1041 (805) 6543 (3654) Umar Khalid JNU (India) 70 (63) 2465 (1967) 18 (17) 2922 (2028) Northeast Delhi Riots 2020 (India) 1182 (1055) 1622 (1441) 1605 (1470) 4364 (3539) Hindu Lives Matter (USA & India) 343 (216) 219 (167) 1134 (552) 1603 (967) OVERALL 51367 8070 (6161) 10556 (7088) 29026 (16186) Interestingly, we find overlapping posts between the topics of \u2018never Trump\u2019 and the Indian protests of Citizenship Amendment Act4. This can be attributed to Donald Trump\u2019s visiting India during the week when protests against the CAA-NRC were at their peak. Our data collection reflects the natural discourse on social media more closely as hateful and provocative content is interlaced with neutral commentary on a topic. The same user can invoke hateful and non-hateful sentiments depending on the topic under discussion [35]. The use of hate lexicons may not capture the same. Auxiliary data: We also collect the root users\u2019 timelines and follower networks. The root user\u2019s timeline data is curated as the 25 tweets before and after the posting of the root tweet. We collect the one-hop followers and followees of the root users. Overall, the interaction network consists of \u224820\ud835\udc40unique users. We also collect the last 100 retweeters of every root tweet. Some of these retweeters 3https://developer.twitter.com/ 4bit.ly/3wWvDRb are followers of the root users, and are therefore marked as internal retweeters. Retweeters not initially captured in our interaction network are marked as external retweeters. 3.1 Label Definitions Vulnerable groups can be defined as people who have historically been oppressed or abused based on religion, caste, country, color, gender, ethnicity, etc. For example, Muslims are a vulnerable group in China, whereas, in the USA, people of Chinese origin are minorities. Similarly, religion and caste are some known prejudices within the Indian subcontinent. Therefore, based on the content of a post and the vulnerable group it targets, a post can be considered Hateful (H), Offensive (O), Provocative (P), or Normal (N). As shown in Figure 2(a), to further reduce the disagreement, we introduce an order of priority \ud835\udc3b> \ud835\udc42> \ud835\udc43> \ud835\udc41. While anyone can offend and provoke anyone, hate only applies if the attack is against a vulnerable group. Note that we do not have a pre-defined list of vulnerable groups, but use the definition to help the annotators be cognizant of the task at hand. We define each label as follows: Hate: It is differentiated by extreme bias5 against the target [61] via any of the following: (1) Negatively stereotypes or distorts views on a vulnerable group with unfounded claims. (2) Silence or suppress a member(s) of a vulnerable group. (3) Promotes violence against a vulnerable group member(s). Offensive: A statement is offensive if it conforms to one of the following points: (1) Use of derogatory words to abuse, curse (\u201cgo die,\" \u201ckill yourself\"), sexualize (\u201cf** you,\" \u201ckiss my a**\"), or express inferiority (\u201cuseless,\" \u201cdumb,\" \u201ccrazy\") towards an entity, criticizing the person and not the action/event. (2) Comparison with demons/criminals/animals either directly or by implication (\u201cWasn\u2019t X bad enough,\" \u201cY is p**\"). (3) Use hashtags covered by either points #1 or #2. Hashtags like #EUnuch (UK) #NastyNancy (USA), #SellOutHiliary (USA), #PMNautanki (India), #CoronaJihad (India) are hurtful by themselves. Meanwhile, #DelhiViolence or #NeverTrump or #NotMyPM, or #Resign are not offensive just by themselves and need the content of a tweet to determine their label. Provocative: If a post itself is not offensive based on the above definitions but provokes some form of negative reaction from the reader, then it is provocative based on any/all of the following: (1) It accuses a particular group or individual of an event related to issues surrounding the group. (2) Invokes a call to action to stir a group against the target. (3) The implication of boycotting the target because of issues surrounding the group is also provocative. It can be social (like disallowing entry), economic (like denying financial entitlements), or political (like denying political participation). 3.2 Data Annotation We perform annotation in a two-phase continuous validation manner [19] as shown in Figure 2(b). Annotation phase I: Two researchers and a lexicographer conducted the first annotation phase (referred to as Group A). They 5UN Definition: bit.ly/3HoFpjP KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Kulkarni et al. Does the tweet contain any offensive hashtag? Is the tweet offensive according to other points? Is the tweet targeted and extremely biased? Is the tweet provocative? Mark as Hate (H) Mark as Offensive (O) Mark as Provocative (P) Mark as Normal (N) No Yes No Yes No Yes No Yes Does the tweet contain any offensive hashtag? Is the tweet offensive according to other points? Is the tweet targeted and extremely biased? (a) (b) Figure 2: (a) Flowchart for annotating a post. Precedence for labels: H>O>P>N. (b) Overview of the two-phased continuousvalidation annotation process. The label set consists of Hate (H), Offensive (O), Provocative (P) and Neutral (N). Table 2: Inter-class similarity within a hate dataset measured via Jensen\u2013Shannon divergence (JS). The lower the JS, the harder it will be to separate the classes. Dataset Label JS Dataset Label JS GOTHate H O 0.135 Founta [19] H A 0.205 H P 0.087 H S 0.476 H N 0.118 H N 0.319 O P 0.119 A S 0.468 O N 0.121 A N 0.318 P N 0.063 S N 0.243 Davidson [13] H O 0.204 HASOC19 [33] H N 0.246 H N 0.347 OLID [63] O N 0.115 O N 0.332 HatEval [5] H N 0.131 ImpHate [16] H N 0.122 RETINA [35] H N 0.254 were given a random sample of 1000 tweets spread across all topics. The guidelines crystallized over iterations. During annotation, Group A observed that some content had provocative connotations while being neutral as per the Twitter guidelines. We thus introduced \u2018provocation\u2019 as a fourth category. Annotation guidelines were refined until Krippendorf\u2019s \ud835\udefc[31] of 0.8 was achieved. Annotation phase II: We partnered with a professional data annotation company for crowdsourced annotation of the entire dataset. After the initial screening, we were assigned a group of 10 professional annotators (referred to as Group B). The corpus was divided into batches of 2500 samples. The annotation agreement was calculated for 100 random tweets per batch (annotated by Group A) to ensure the annotation quality in each batch. We obtain an average agreement of 0.71 Krippendorf\u2019s \ud835\udefc. Appendix E provides details about the annotator\u2019s demographics. Observations from data annotation: Data collection and annotation exercises show us that \u2013 (a) Compared to an annotator agreement of 0.80 in phase I, we obtain an agreement of 0.71 at the end of phase II. This reinforces the difficulty in annotating hate speech [44, 55] encompassing the diversity across languages, geographies, and topics. (b) 60% of disagreements were in the provocative class. The difference stems from the subtle nature of provocation. Sample annotations are enlisted in Appendix F. 4 YET ANOTHER HATE SPEECH DATASET? Given the cornucopia of hate speech datasets, a comparison of GOTHate and the existing benchmark datasets is imperative. This section discusses our hypothesis that GOTHate is hard to classify. Apart from Davidson [13] and Founta [19], we also employ three hate shared tasks \u2013 HASOC19 [33], OLID [63], and HatEval [5]. HatEval is a neutrally-seeded dataset. We also compare GOTHate with the implicit hate corpus [16] (referred to as ImpHate, henceforth). Regarding Indian topical diversity, the RETINA dataset proposed by [35] comes closest. An outline of various benchmark datasets is provided in Appendix C. Meanwhile, extended experiments from the section are also provided in Appendix D. Class labels Hate (H), Offensive (O), Provocative (P), Neutral (N), Abuse (A), and Spam (S) cover all labels in our dataset and the benchmark datasets. 4.1 Inter-class Similarity Intuition. One can employ Jensen\u2013Shannon (JS) divergence [40] to capture the inter-class proximity within a dataset. The lower the divergence, the closer the class distribution, and the harder it would be to classify and distinguish between them. Analysis. We generate a Laplacian smoothed uni-gram distribution of each class. Table 2 shows that for GOTHate, the JS divergence values for the pairs of H-P=0.087 and N-P=0.063 are lower than other pairs. This low divergence is a cause for the high disagreement the provocative class receives during annotation and the underlying reason for it. Additionally, due to the lack of hate lexicons during data collection, the hatred class is closer to neutral (H-N=0.118) than the offense (H-O=0.135). On the other hand, the offensive class has a slightly higher divergence from other classes (H-O=0.135, P-O=0.119, and N-O=0.121). Posts containing abusive words and hashtags are more likely to be marked as offensive, providing the class with a latent offensive lexicon. We extend the above inter-class comparison to existing hate speech datasets. In terms of hate-neutral distributions, GOTHate has a lower divergence (0.118) than the more explicit counterparts of Davidson (0.339) and Founta (0.314). Besides, owing to low explicitness, GOTHate\u2019s hate-neutral divergence is closer to ImpHate (0.118 vs. 0.122). Overall, it is evident from Table 2 that \u2013 (a) for all hate speech datasets, the inter-class divergence tends to be low; (b) in our case, the reason for the further lowering of divergence can be attributed to the topical similarity and use of neutral seeding for data collection. 4.2 How Hard is GOTHate to Classify? Intuition. Given the lower inter-class divergence of our dataset, we hypothesize that GOTHate will be hard to classify. Revisiting Hate Speech Benchmarks: From Data Curation to System Deployment KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Table 3: Performance comparison of hate speech datasets. We report the class-wise and overall (C) macro-F1 and Matthews correlation coefficient (MCC). Dataset H O/A P/S N C (MCC) GOTHate 0.20 0.40 0.35 0.62 0.39 (0.218) Founta [19] 0.26 0.77 0.41 0.77 0.55 (0.460) Davidson [13] 0.30 0.90 0.78 0.66 (0.585) HASOC19 [33] 0.59 0.78 0.68 (0.378) OLID [63] 0.51 0.81 0.66 (0.316) HatEval [5] 0.61 0.53 0.57 (0.215) ImpHate [16] 0.59 0.74 0.67 (0.331) RETINA [35] 0.59 0.98 0.78 (0.570) Table 4: Evaluating adversarial validation using GOTHate as the target dataset against existing datasets as the source. We report accuracy (ACC), macro-F1 (F1), ROC-AUC, and Matthews correlation coefficient (MCC). Source Dataset Evaluation Metric Acc F1 ROC-AUC MCC Founta [19] 0.98 0.97 0.977 0.949 Davidson [13] 0.99 0.99 0.987 0.974 HASOC19 [33] 0.94 0.93 0.926 0.873 OLID [63] 0.98 0.95 0.973 0.910 HatEval [5] 0.99 0.99 0.985 0.970 ImpHate [16] 0.98 0.98 0.977 0.953 RETINA [35] 0.96 0.96 0.962 0.925 Analysis. We extend upon the \ud835\udc5b-gram TF-IDF-based logistic regression model employed in inter-class divergence study to compare performance under this setup. To account for the varying positive and negative class sizes, we employ the Matthews correlation coefficient (MCC) to establish the lower correlation of our dataset\u2019s prediction. The experimental results are reported in Table 3. It can be observed that: (a) Our dataset has the lowest macro-F1 for overall and the respective class. This low performance is further corroborated by the low MCC scores (0.2180) that GOTHate receives against the setup. (b) HatEval, another neutrally seeded dataset, has a low MCC score (0.2153). However, given the binary nature of labels, it still performs better than the 4-way labeled GOTHate. 4.3 Adversarial Validation Intuition. Data drift [32] measures the change in feature space between two dataset versions. All samples in the old (aka source) dataset are labeled as 0 for analysis. Consequently, all samples in the new (aka target) dataset are labeled as 1. A simple classifier is trained to predict the labels as {0,1}. A high performance indicates discriminatory features between the two versions of the dataset. We extend this to compare existing hate speech datasets with GOTHate. Analysis. The training (T) split from the source hate dataset (\ud835\udc4b\ud835\udc47 \ud835\udc60\ud835\udc5f\ud835\udc50) is labeled as (\ud835\udc4c\ud835\udc47 \ud835\udc60\ud835\udc5f\ud835\udc50= 0). Meanwhile, the training split from GOTHate (\ud835\udc4b\ud835\udc47 \ud835\udc61\ud835\udc5f\ud835\udc54) is labelled as (\ud835\udc4c\ud835\udc47 \ud835\udc61\ud835\udc5f\ud835\udc54= 1). Similar labeling is followed for testing (\ud835\udc4b\ud835\udc38,\ud835\udc4c\ud835\udc38). We employ a \ud835\udc5b-gram ({1,2,3}) based TF-IDF logistic regression model to capture the data drift with GOTHate as target data. It can be observed from Table 4 that the lower ROC-AUC scores w.r.t HASOC19 (0.926) and RETINA (0.962) indicate relatively higher similarity of GOTHate to the Hindi-oriented aspects covered by these datasets. However, none of the dataset comparisons lead to a ROC-AUC <.5 or MCC \u22480, which would have indicated that the feature space of GOTHate is indistinguishable from existing datasets. The results are contrary; the scores vary within a narrow range of 0.021 ROCU-AUC and 0.101 MCC. The lowest ROC-AUC (MCC) score is 0.926 (0.873), obtained from HASOC19 (a multi-lingual Hindi corpus). This corroborates the variability in feature space captured by GOTHate. Table 5: Cross-dataset performance comparison among GOTHate, HASOC19 and RETINA. We report ROC-AUC (Matthews correlation coefficient) on binarised label sets. Train\u2193Test\u2192 GOTHate RETINA [35] HASOC19 [33] GOTHate 0.645 (0.128) 0.610 (0.207) RETINA [35] 0.512 (0.044) 0.518 (0.078) HASOC19 [33] 0.546 (0.101) 0.583 (0.079) 4.4 Can GOTHate generalise better? Intuition. From our data-drift experiments, we observe that in terms of feature space, HASOC, and RETINA (datasets focused on the Indian context) are closest to GOTHate. However, both largely depend on hashtags and explicitness to capture hatefulness. While generalisability is an open challenge for hate speech, we hypothesize that topic-driven neutrally seeded datasets like ours should perform better than their lexicon-driven counterparts. In this case, the intuition is that when we train on GOTHate and test on HASOC/RETINA, the performance drop will be less than if we reverse the setup (i.e., train on HASOC/RETINA) and test on GOTHate. Analysis. We again extend upon the \ud835\udc5b-gram TF-IDF-based logistic regression model and perform cross-dataset testing. As the label spaces for GOTHatevary from binary labels of HASOC19 and RETINA, we binarize GOTHate into hateful (H, O) and non-hateful (P, N) subsets. As observed from the ROC-AUC and MCC scores from Table 5, based on common vogue, we see a clear lack of cross-dataset generalisability among the three. When trained on GOTHateand tested on HASOC19, we observe a ROC-AUC of 0.645. Meanwhile, when trained on HASOC19 and tested on GOTHate, we observe the ROC-AUC of 0.512, i.e., a 0.133 difference in performance in the two setups. Similar results are observed for the GOTHate and RETINA pair \u2013 a 0.064 drop of ROC-AUC when trained on RETINA and tested on GOTHate than vice versa. None of these setups are ideal for fully capturing hateful context. Still, under existing circumstances, our dataset provides better scope for knowledge transfer. The characteristics that make GOTHate hard to classify may lend to generalisability. It can be an interesting direction for the future. 5 ENDOGENOUS SIGNAL INFUSION This section introduces the modular mixture-of-experts setup called History-Examplar-Network Infused mBERT (HEN-mBERT) that enriches the textual representations with the ancillary signals and improves the detection of hateful content. Figure 3 illustrates the combined model architecture of HEN-mBERT. KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Kulkarni et al. Figure 3: Model architecture of HEN-mBERT. The embedding for an incoming tweet is obtained from mBERT. The exemplar and timeline modules obtain the input embedding in the form of the mBERT CLS token. Meanwhile, the graph module receives input from the corresponding user\u2019s ego network. Each module enhances the respective input embedding via cross-modal attention. Ultimately, the reasoning module concatenates the three endogenous signals to obtain a context-rich embedding, which passes through an attentive-feed forward block for classification. Base Language Module. We employ multilingual-BERT [15] as our base LM and use the CLS token of the last layer to generate embeddings for exemplar and timeline. For the interaction module, we utilize the representations from the last layer of mBERT given by \ud835\udc4d\u2208R\ud835\udc59\u00d7\ud835\udc51, where \ud835\udc51is the feature dimension of LM, and \ud835\udc59is the maximum sequence length. 5.1 Exemplar Module Intuition. Exemplars refer to the set of sample responses from the training set that are semantically related to the input context. They can provide stylistic and thematic cues to the model [53]. Such an exemplar-based approach is useful when the inter-label diversity is less, as in the case of GOTHate. As one can generate these example samples from within the dataset without scraping extra data, we start by augmenting this signal using dense retrieval via SBERT6 [47]. We employ a label-based exemplar search for the training samples where the exemplars are retrieved from the same label subspace. We extract label invariant (without knowing the label for an incoming test post) exemplars for these validation and test sets using the training dataset for grounding. For each instance, we select the top-\ud835\udc58exemplars based on cosine similarity. Some sample exemplars from train and test sets are provided in Appendix H. Formulation. To extend and extract this within-dataset signal for an incoming tweet, we concatenate the exemplars \ud835\udc43\ud835\udc56= {\ud835\udc5d\ud835\udc561, ..., \ud835\udc5d\ud835\udc56\ud835\udc4f} as \ud835\udc39\ud835\udc52\u2208R\ud835\udc4f\u00d7\ud835\udc51. This is followed by a non-linear transformation and dimensionality reduction casting \ud835\udc39\ud835\udc52to \u02c6 \ud835\udc39\ud835\udc52\u2208R\ud835\udc4f\u00d7\ud835\udc51\ud835\udc53. The language vector \ud835\udc4dalso goes through a dimensionality reduction to generate vector \ud835\udc4d\ud835\udc52\u2208R\ud835\udc59\u00d7\ud835\udc51\ud835\udc53. To extract salient features from \ud835\udc39\ud835\udc52and \ud835\udc4d\ud835\udc52, they are independently passed to a self-attention module [59] to generate \ud835\udc39\u2032 \ud835\udc52(Eq. 1) and \ud835\udc4d\u2032 \ud835\udc52(Eq. 2), respectively. Finally, these enriched vectors interact with each other via multi-headed cross-modal attention [59] to facilitate deep semantic interaction. Specifically, we query \ud835\udc39\u2032 \ud835\udc52against \ud835\udc4d\u2032 \ud835\udc52as the key and the value, generating exemplar-infused language representation \u02c6 \ud835\udc4d\ud835\udc52(Eq. 3). 6We experimented with BM25 as well, but SBERT performed better. \ud835\udc39\u2032 \ud835\udc52= \ud835\udc46\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65 \ud835\udc39\ud835\udc52\ud835\udc39\ud835\udc47 \ud835\udc52 \u221a\ufe01\ud835\udc51\ud835\udc53 ! \ud835\udc39\ud835\udc52 (1) \ud835\udc4d\u2032 \ud835\udc52= \ud835\udc46\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65 \ud835\udc4d\ud835\udc52\ud835\udc4d\ud835\udc47 \ud835\udc52 \u221a\ufe01\ud835\udc51\ud835\udc53 ! \ud835\udc4d\ud835\udc52 (2) \u02c6 \ud835\udc4d\ud835\udc52= \ud835\udc40\ud835\udc62\ud835\udc59\ud835\udc61\ud835\udc56\ud835\udc3b\ud835\udc52\ud835\udc4e\ud835\udc51 \ud835\udc46\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65 \ud835\udc39\u2032 \ud835\udc52\ud835\udc4d\u2032\ud835\udc47 \ud835\udc52 \u221a\ufe01\ud835\udc51\ud835\udc53 ! \ud835\udc4d\u2032 \ud835\udc52 ! (3) 5.2 Timeline Module Intuition. While examplars help obtain latent signals from within the dataset, users\u2019 propensity for posting hateful content is not just a one-time incident. Users who interact with similarly malicious users are likely to disseminate hateful content online regularly and more likely to post offensive content than their benign counterparts [37]. Thus, a user\u2019s historical data provides crucial insights into whether they will post hostile material in the future [45]. Formulation. To begin with, we concatenate historical posts of a user, \ud835\udc47\ud835\udc56= {\ud835\udc61\ud835\udc561, ...,\ud835\udc61\ud835\udc56\ud835\udc4e}, to generate feature vector \ud835\udc39\ud835\udc61\u2208R\ud835\udc4e\u00d7\ud835\udc51. To encapsulate the sequential nature of the historical data, \ud835\udc39\ud835\udc61undergoes two layers of LSTM to generate temporally-enriched vector \u02c6 \ud835\udc39\ud835\udc61. Here as well, \ud835\udc4dundergoes dimensionality reduction producing vector \ud835\udc4d\ud835\udc61\u2208R\ud835\udc59\u00d7\ud835\udc51\ud835\udc53. To extract the temporal and language-specific nuances, the vectors \u02c6 \ud835\udc39\ud835\udc61and \ud835\udc4d\ud835\udc61undergo a self-attention operation, similar to Eq. 1 and Eq. 2. This results in their enriched form of vectors given by \ud835\udc39\u2032 \ud835\udc61and \ud835\udc4d\u2032 \ud835\udc61. Finally, the language and timeline features interact using multi-headed cross-modal attention. This results in the final feature vector \u02c6 \ud835\udc4d\ud835\udc61(Eq. 4) as shown below: \u02c6 \ud835\udc4d\ud835\udc61= \ud835\udc40\ud835\udc62\ud835\udc59\ud835\udc61\ud835\udc56\ud835\udc3b\ud835\udc52\ud835\udc4e\ud835\udc51 \ud835\udc46\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65 \ud835\udc39\u2032 \ud835\udc61\ud835\udc4d\u2032\ud835\udc47 \ud835\udc61 \u221a\ufe01\ud835\udc51\ud835\udc53 ! \ud835\udc4d\u2032 \ud835\udc61 ! (4) 5.3 Graph Module Intuition. Hateful users are likelier to follow and retweet other toxic users [37, 48]. Therefore, we examine their ego network and extract their interaction patterns. We begin by constructing a directed homogeneous network of all the root users, their first-hop followers/followees, and the users retweeting the root users\u2019 tweets. We add varying degrees of edge weights to distinguish the different interactions further. Initially, all the edges are given a weight of 1, except the self-loop weight of 0.1. Followers/ followees who retweet the root users\u2019 tweets earn higher precedence with an edge Revisiting Hate Speech Benchmarks: From Data Curation to System Deployment KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA weight of 1.5. Meanwhile, external retweeters not included in the root users\u2019 follower-followee are given slightly less weight of 0.5. The base network embeddings are created using node2vec7 [24]. Formulation. For a tweet, \ud835\udc65\ud835\udc56by a user \ud835\udc62\ud835\udc57, HEN-mBERT utilizes a user-level ego network \ud835\udc5c\ud835\udc57= \ud835\udc3a(\ud835\udc49\u2032 \ud835\udc57, \ud835\udc38\u2032 \ud835\udc57) to extract useful interaction patterns. We select the top 100 followers, followee, and retweeters with the highest node centrality for each user. Each node in \ud835\udc49\u2032 \ud835\udc57is initialized with node embeddings of dimension \ud835\udc51\ud835\udc54. The ego network goes through three rounds of graph convolutions [29] followed by a max-pooling operation to spawn an aggregated graph embedding \ud835\udc39\ud835\udc54\u2208R\ud835\udc51\ud835\udc53, where \ud835\udc51> \ud835\udc51\ud835\udc53> \ud835\udc51\ud835\udc54(Eq. 5). The language vector \ud835\udc4dgoes through a dimensionality reduction to generate vector \ud835\udc4d\ud835\udc54\u2208R\ud835\udc59\u00d7\ud835\udc51\ud835\udc53(Eq. 6). To extract salient features from \ud835\udc39\ud835\udc54and \ud835\udc4d\ud835\udc54, they are independently passed to a self-attention module [59] to generate \ud835\udc39\u2032 \ud835\udc54and \ud835\udc4d\u2032 \ud835\udc54, similar to Eq. 1 and Eq. 2. Finally, these enriched vectors interact with each other via multi-headed crossmodal attention. We query \ud835\udc39\u2032 \ud835\udc54against \ud835\udc4d\u2032 \ud835\udc54as the key and the value, generating the network-aware language representation \u02c6 \ud835\udc4d\ud835\udc54(Eq. 7). \ud835\udc39\ud835\udc54= \ud835\udc3a\ud835\udc5f\ud835\udc4e\ud835\udc5d\u210e\ud835\udc36\ud835\udc5c\ud835\udc5b\ud835\udc63(\ud835\udc3a(\ud835\udc49\u2032 \ud835\udc57, \ud835\udc38\u2032 \ud835\udc57)) (5) \ud835\udc4d\ud835\udc54= \ud835\udc45\ud835\udc52\ud835\udc3f\ud835\udc48(\ud835\udc4d\ud835\udc4a\ud835\udc54+ \ud835\udc4f\ud835\udc54) (6) \u02c6 \ud835\udc4d\ud835\udc54= \ud835\udc40\ud835\udc62\ud835\udc59\ud835\udc61\ud835\udc56\ud835\udc3b\ud835\udc52\ud835\udc4e\ud835\udc51 \ud835\udc46\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65 \ud835\udc39\u2032 \ud835\udc54\ud835\udc4d\u2032\ud835\udc47 \ud835\udc54 \u221a\ufe01\ud835\udc51\ud835\udc53 ! \ud835\udc4d\u2032 \ud835\udc54 ! (7) 5.4 Reasoning Module The reasoning module aptly coalesces the information from each module via mutual interaction. We concatenate the vectors \u02c6 \ud835\udc4d\ud835\udc54, \u02c6 \ud835\udc4d\ud835\udc52, and \u02c6 \ud835\udc4d\ud835\udc61and apply a non-linear transformation for dimensionality reduction, resulting in vector \ud835\udc4d\ud835\udc53\ud835\udc56\ud835\udc5b\ud835\udc4e\ud835\udc59\u2208R\ud835\udc59\u00d7\ud835\udc51. This vector is further enhanced using a multi-headed self-attention mechanism. The final vector is then passed through a classification head. 6 EXPERIMENTS AND RESULTS This section outlines the results and error analysis of HEN-mBERT and its variants compared to baseline methods when classifying GOTHate on a 4-way hate detection task (Table 6). We additionally enlist the experimental setup for reproducibility in Appendix G. 6.1 Performance Comparison Due to the lack of auxiliary signals in other hate datasets, we cannot extend HEN-mBERT to test those. However, we compare endogenous signal augmentation in HEN-mBERT against numerous relevant baselines. We additionally enlist the performance for adding each latent signal independently. Due to skewness in label size, we focus on class-wise and overall F1 scores as our performance metric. Table 6 shows the model level performance. We also provide a detailed class-wise precision and recall breakdown in Appendix I. Traditional models: We employ Naive Bayes (NB), Logistic Regression (LR), and Support Vector Machines (SVM) with \ud835\udc5b-gram based TF-IDF based features. These models (M1-M3), enlisted in Table 6, show that the traditional machine learning baselines yield inferior performances on hate and provocation labels. Upon examination (breakdown provided in Appendix I), we observe that those systems are characterized by a high recall for the benign samples and a low recall for the hateful ones, indicating that such models 7We also experimented with GraphSage [25], but node2vec faired better. Table 6: Performance analysis in terms of macro-F1 of competing models as discussed in Section 6.1. Model Hate Offensive Provocative Neutral Combined M1: NB 0.1802 0.4153 0.3004 0.7267 0.4057 M2: LR 0.1957 0.4430 0.3271 0.7601 0.4315 M3: SVC 0.2382 0.4520 0.3519 0.7382 0.4451 M4: Davidson [13] 0.2504 0.4070 0.3734 0.6069 0.4094 M5: CNN 0.2474 0.4148 0.3716 0.6019 0.4087 M6: LSTM 0.2579 0.4508 0.4052 0.6352 0.4373 M7: Founta [19] 0.2075 0.3603 0.2864 0.5650 0.3548 M8: mBERT 0.2795 0.4939 0.3856 0.7285 0.4719 M9: ARHNet [22] 0.2860 0.4719 0.4075 0.7140 0.4699 M10: HurtBERT [30] 0.2717 0.4937 0.4024 0.7143 0.4705 M11: HEN-mBERTE 0.3090 0.5019 0.4395 0.6977 0.4870 M12: HEN-mBERTT 0.2894 0.4853 0.4271 0.6987 0.4751 M13: HEN-mBERTG 0.2829 0.5189 0.3891 0.6938 0.4712 M14: HEN-mBERT 0.3367 0.4997 0.4061 0.7358 0.4946 are aggressive in predicting each sample as non-hateful. By augmenting the \ud835\udc5b-gram features with semantic features (pos-tags) and textual meta-data (Vader score, hashtag counter, etc.), the Davidson [13] model (M4) slightly improves the performance for the hate class, compared to vanilla statistical baselines. The skewness of the class labels and over-dependency on word co-occurrence contribute to the downfall of these systems. Neural baselines: In the next set of modeling, we employ vanilla CNN [27] and LSTM [54] coupled with concatenated Glove embeddings of English and Hindi (EN+HI). We observe that CNN and LSTM neural models (M5-M6) fare better than the traditional ones but still give underwhelming results. We notice about 2 \u22125% gain over traditional methods for the hate and provocative instances in terms of the F1 score. The F1 score for offensive labels more or less remains the same. The Founta [20] model (M7) with a selfattention-based RNN method and Glove (EN+HI) also reports low performance, especially for hate classes. Static word vectors and their inability to capture context contribute to their pitfalls. Transformer baselines: By applying mBERT (M8), we observe substantial gain over traditional and neural baselines \u2013 a 2%, 4%, 3%, and 4% improvement in F1-score for hate, offensive, provocative, and non-hate labels, respectively. Additionally, we train two existing hate detection models, ARHNet (M9) [22] and HurtBERT (M10) [30]. ARHNet concatenates the language representations with node2vec user embeddings as a late fusion combination. For a fairer comparison, we train an mBERT-based ARHNet instead of the original BiLSTM based. Though we see about 1% improvement for the hated class, overall, it does not fare better than naive mBERT. On the other hand, the HurtBERT model, which jointly encodes multilingual hate lexicon knowledge with the language representations, does not showcase improvements over mBERT. This might be attributed to the absence of large-scale hateful lexicons in GOTHate that impede HurtBERT\u2019s performance. Endogenous signal infusion: The based mBERT that incorporates all three endogenous signals is HEN-mBERT (M14). The individual latent augments are enlisted as HEN-mBERT{E, T, G} to represent the addition of either exemplar, historical, or network-based features, respectively. As Table 6 shows, the performance for hate classification is enhanced by adding each module. The exemplar-only module (M11: HEN-mBERTE) reports an overall increase in performance, presenting noteworthy gains for the hate and provocative KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Kulkarni et al. class. As these classes are the most difficult to classify, we conjecture that adding exemplary tweets helps the model unravel the stylistic and semantic nuances across these labels. It is especially true for the provocative class, which witnesses the highest disagreement during annotation. While existing baselines reach F1 of 0.4075 for provocation detection, HEN-mBERTE improves it to 0.4395. Meanwhile, infusing exemplar signals significantly improves (2.4 F1 increase) hate classification. While the timeline module (M112: HEN-mBERTT) does provide an improvement over existing baselines, the results do not improve upon the exemplar setup. On the other hand, the graph module (M13: HEN-mBERTG) scores the highest points for the offensive class. It suggests that the offensive users\u2019 interaction patterns are more distinct than those of the other classes. Finally, combining all three signals in HEN-mBERT (M14) summits the performance across all the metrics \u2013 a significant improvement of about 5% in the hate label F1-score (from best baseline) and a 3 point improvement over best HEN-mBERTvariant. The troublesome provocative class also enjoys a 2% rise in the F1 score. Combining all three variants also keeps the neutral class\u2019s performance on the higher side (0.7358). Collaboratively, we see a 2 point increase in the combined macro-F1. Interestingly, from Table 6, we observe that the addition of HEN-mBERTE gives the highest improvement for provocative classification (0.4395). Meanwhile, the addition of HEN-mBERTG improves offensive classification to the highest degree (0.5189). Combining all signals shows the most significant improvement in the hate class (0.3367) and overall (0.4946). Based on the extensive performance comparison for GOTHate against benchmark models, we observe that: (a) Our attempts at modeling textual features in different forms (from non-contextual to contextual to endogenous informed) corroborate that hate speech detection is not just a text classification task. (b) HEN-mBERT and its variants produce a balanced and improved performance, especially for the harder-to-classify classes of hate and provocation. (c) No one auxiliary signal is fully comprehensive. Combining them helps differentiate the latent space between hateful and non-hateful contexts. (d) The above points reiterate our second hypothesis about the usefulness of auxiliary signals in building better hate classification. Note on the proposed framework: We experimented with various modeling techniques and observed that simpler but endogenous signal-rich setups worked better. Cognizant that not all internet forums can access all features, we propose using a pluggable framework where the individual modules can be tailored for the use case. Further, the attention-based infusion brings the latent subspaces of different auxiliary signals closer to the textual subspace. Building upon HEN-mBERT provided in this study is the way forward for context-aware hate speech detection. 6.2 Error Analysis Given that HEN-mBERT extends the mBERT architecture, it is critical to investigate and compare the quality of their predictions. While HEN-mBERT is enriched with signals to provide additional context, mBERT relies only on the information captured within a tweet\u2019s text. For tweets that contain explicit offences such as \u201cAPP is ISIS OF INDIA\u201d and \u201carrest Sonia Gandhi\u201d (#1 in Table 7), both the models effectively pick on these cues and make correct predictions. However, in examples that require a post\u2019s contextual knowledge (#2 in Table 7), mBERT falters. A closer analysis of this tweet\u2019s users reveals that they actively post Islamophobic content. Consider the following sample tweets from the user\u2019s posting history, collected from the user\u2019s timeline posted before (\ud835\udc61\ud835\udc4f) and after (\ud835\udc61\ud835\udc4e) the tweet under examination: (tb) \"look at what Hindus living in mixed-population localities are facing, what \ud835\udc40\ud835\udc38\ud835\udc41\ud835\udc47\ud835\udc3c\ud835\udc42\ud835\udc41had to face for merely asking his Muslim neighbors not to harass his daughter sexually...and even then if u ask why people don\u2019t rent to Muslims, get ur head examined.\" (ta) \"\ud835\udc40\ud835\udc38\ud835\udc41\ud835\udc47\ud835\udc3c\ud835\udc42\ud835\udc41 and \ud835\udc40\ud835\udc38\ud835\udc41\ud835\udc47\ud835\udc3c\ud835\udc42\ud835\udc41naah...Islamists will never accept Muslim refugees; they will tell the Muslims to create havoc in their home countries and do whatever it takes to convert Dar-ul-Harb into Dar-ul-Islam. Something we should seriously consider doing with Pak Hindus too\". Using such information, our model can develop an understanding of this user\u2019s hate propensity to make the correct predictions, while mBERT predicts incorrectly. While most misclassifications by both models can be attributed to the ambiguities of hate speech, one must also consider mislabeled annotations. As observed from #3 in Table 7, though the tweet seems innocuous, it was annotated as provocative and predicted the same by mBERT, while HEN-mBERT predicts it as offensive. Such mislabelling indicates that no hate speech pipeline is immune to annotation biases. 7 CONTENT MODERATION PIPELINE Inspired by our experiments, in partnership with Wipro AI, we are developing an interactive web interface for contextual hate speech detection [34]. This interface will be a part of their more extensive pipeline to flag and analyze harmful content on the web. An overview of various components of the semi-automated content flagging interface is outlined in Figure 4. In our current experiments, we establish our hypothesis of using endogenous signals by testing in offline mode in which the test set was split for GOTHate and the auxiliary signals were already curated. However, in our proposed pipeline, the system will operate online. For an incoming tweet posted by a user, we will query their most recent timeline and network interaction in real time. As noted in our experiments, for a slight variation in accuracy, the static node2vec system can be replaced by incremental GraphSage [37]. Meanwhile, the pretrained exemplar knowledge base will extract exemplars for an incoming tweet. The auxiliary signals thus obtained will be used to perform contextual hate speech detection. The existing input post, its endogenous signals, and the predicted label will aid content moderators in the eventual flagging of content. In future iterations, we aim to use this pipeline to generate feedback from moderators and incrementally train our detection and exemplar models [46]. The rampant spread of hate speech on social media has serious ramifications for victims across demographics. While characterizing and detecting discriminatory speech online is an active research area, most works have focused on a rather explicit form of hate speech, not accounting for topical heterogeneity and linguistic diversity. In this work, we presented Geo-pOlitical Topical Hate dataset (GOTHate), a multi-class hate speech dataset that contains minimal slur terms and covers assorted topics across different languages. In summary, GOTHate manifests as a challenging corpus for classifying hate. We benchmark GOTHate against several benchmark methods. We further employ History-Examplar-Network Infused mBERT(HEN-mBERT) \u2013 a mixture of experts that subsumes endogenous knowledge in mBERT to perform contextually rich hate speech detection. Inspired by the utility of endogenous signals, we are collaborating with Wipro AI to develop a feature-rich pipeline for detecting and moderating hateful content. ACKNOWLEDGMENTS The authors would like to acknowledge the support of the Prime Minister Doctoral Fellowship (SERB India) and the Wipro Research Grant. We would also like to thank our industry partner Wipro AI. Wipro, an Indian multinational conglomerate with diverse businesses, coordinated the field study for possible deployment. We acknowledge the support of Shivam Sharma, Technical Lead, Wipro AI, for the same. We thank Chhavi Jain and Rituparna Mukherjee for their contributions to data curation. We also thank Xsaras for their support in crowdsourced annotations. KDD \u201923, August 6\u201310, 2023, Long Beach, CA, USA Kulkarni et al." + }, + { + "url": "http://arxiv.org/abs/1802.00393v3", + "title": "Large Scale Crowdsourcing and Characterization of Twitter Abusive Behavior", + "abstract": "In recent years, offensive, abusive and hateful language, sexism, racism and\nother types of aggressive and cyberbullying behavior have been manifesting with\nincreased frequency, and in many online social media platforms. In fact, past\nscientific work focused on studying these forms in popular media, such as\nFacebook and Twitter. Building on such work, we present an 8-month study of the\nvarious forms of abusive behavior on Twitter, in a holistic fashion. Departing\nfrom past work, we examine a wide variety of labeling schemes, which cover\ndifferent forms of abusive behavior, at the same time. We propose an\nincremental and iterative methodology, that utilizes the power of crowdsourcing\nto annotate a large scale collection of tweets with a set of abuse-related\nlabels. In fact, by applying our methodology including statistical analysis for\nlabel merging or elimination, we identify a reduced but robust set of labels.\nFinally, we offer a first overview and findings of our collected and annotated\ndataset of 100 thousand tweets, which we make publicly available for further\nscientific exploration.", + "authors": "Antigoni-Maria Founta, Constantinos Djouvas, Despoina Chatzakou, Ilias Leontiadis, Jeremy Blackburn, Gianluca Stringhini, Athena Vakali, Michael Sirivianos, Nicolas Kourtellis", + "published": "2018-02-01", + "updated": "2018-04-15", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "68T06", + "K.4.2" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2108.05927v1", + "title": "Overview of the HASOC track at FIRE 2020: Hate Speech and Offensive Content Identification in Indo-European Languages", + "abstract": "With the growth of social media, the spread of hate speech is also increasing\nrapidly. Social media are widely used in many countries. Also Hate Speech is\nspreading in these countries. This brings a need for multilingual Hate Speech\ndetection algorithms. Much research in this area is dedicated to English at the\nmoment. The HASOC track intends to provide a platform to develop and optimize\nHate Speech detection algorithms for Hindi, German and English. The dataset is\ncollected from a Twitter archive and pre-classified by a machine learning\nsystem. HASOC has two sub-task for all three languages: task A is a binary\nclassification problem (Hate and Not Offensive) while task B is a fine-grained\nclassification problem for three classes (HATE) Hate speech, OFFENSIVE and\nPROFANITY. Overall, 252 runs were submitted by 40 teams. The performance of the\nbest classification algorithms for task A are F1 measures of 0.51, 0.53 and\n0.52 for English, Hindi, and German, respectively. For task B, the best\nclassification algorithms achieved F1 measures of 0.26, 0.33 and 0.29 for\nEnglish, Hindi, and German, respectively. This article presents the tasks and\nthe data development as well as the results. The best performing algorithms\nwere mainly variants of the transformer architecture BERT. However, also other\nsystems were applied with good success", + "authors": "Thomas Mandla, Sandip Modha, Gautam Kishore Shahi, Amit Kumar Jaiswal, Durgesh Nandini, Daksh Patel, Prasenjit Majumder, Johannes Sch\u00e4fer", + "published": "2021-08-12", + "updated": "2021-08-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.CY" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2109.05322v1", + "title": "Latent Hatred: A Benchmark for Understanding Implicit Hate Speech", + "abstract": "Hate speech has grown significantly on social media, causing serious\nconsequences for victims of all demographics. Despite much attention being paid\nto characterize and detect discriminatory speech, most work has focused on\nexplicit or overt hate speech, failing to address a more pervasive form based\non coded or indirect language. To fill this gap, this work introduces a\ntheoretically-justified taxonomy of implicit hate speech and a benchmark corpus\nwith fine-grained labels for each message and its implication. We present\nsystematic analyses of our dataset using contemporary baselines to detect and\nexplain implicit hate speech, and we discuss key features that challenge\nexisting models. This dataset will continue to serve as a useful benchmark for\nunderstanding this multifaceted issue.", + "authors": "Mai ElSherief, Caleb Ziems, David Muchlinski, Vaishnavi Anupindi, Jordyn Seybolt, Munmun De Choudhury, Diyi Yang", + "published": "2021-09-11", + "updated": "2021-09-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.SI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.09509v4", + "title": "ToxiGen: A Large-Scale Machine-Generated Dataset for Adversarial and Implicit Hate Speech Detection", + "abstract": "Toxic language detection systems often falsely flag text that contains\nminority group mentions as toxic, as those groups are often the targets of\nonline hate. Such over-reliance on spurious correlations also causes systems to\nstruggle with detecting implicitly toxic language. To help mitigate these\nissues, we create ToxiGen, a new large-scale and machine-generated dataset of\n274k toxic and benign statements about 13 minority groups. We develop a\ndemonstration-based prompting framework and an adversarial\nclassifier-in-the-loop decoding method to generate subtly toxic and benign text\nwith a massive pretrained language model. Controlling machine generation in\nthis way allows ToxiGen to cover implicitly toxic text at a larger scale, and\nabout more demographic groups, than previous resources of human-written text.\nWe conduct a human evaluation on a challenging subset of ToxiGen and find that\nannotators struggle to distinguish machine-generated text from human-written\nlanguage. We also find that 94.5% of toxic examples are labeled as hate speech\nby human annotators. Using three publicly-available datasets, we show that\nfinetuning a toxicity classifier on our data improves its performance on\nhuman-written data substantially. We also demonstrate that ToxiGen can be used\nto fight machine-generated toxicity as finetuning improves the classifier\nsignificantly on our evaluation subset. Our code and data can be found at\nhttps://github.com/microsoft/ToxiGen.", + "authors": "Thomas Hartvigsen, Saadia Gabriel, Hamid Palangi, Maarten Sap, Dipankar Ray, Ece Kamar", + "published": "2022-03-17", + "updated": "2022-07-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1705.09993v2", + "title": "Deep Learning for User Comment Moderation", + "abstract": "Experimenting with a new dataset of 1.6M user comments from a Greek news\nportal and existing datasets of English Wikipedia comments, we show that an RNN\noutperforms the previous state of the art in moderation. A deep,\nclassification-specific attention mechanism improves further the overall\nperformance of the RNN. We also compare against a CNN and a word-list baseline,\nconsidering both fully automatic and semi-automatic moderation.", + "authors": "John Pavlopoulos, Prodromos Malakasiotis, Ion Androutsopoulos", + "published": "2017-05-28", + "updated": "2017-07-17", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1804.03124v2", + "title": "Leveraging Intra-User and Inter-User Representation Learning for Automated Hate Speech Detection", + "abstract": "Hate speech detection is a critical, yet challenging problem in Natural\nLanguage Processing (NLP). Despite the existence of numerous studies dedicated\nto the development of NLP hate speech detection approaches, the accuracy is\nstill poor. The central problem is that social media posts are short and noisy,\nand most existing hate speech detection solutions take each post as an isolated\ninput instance, which is likely to yield high false positive and negative\nrates. In this paper, we radically improve automated hate speech detection by\npresenting a novel model that leverages intra-user and inter-user\nrepresentation learning for robust hate speech detection on Twitter. In\naddition to the target Tweet, we collect and analyze the user's historical\nposts to model intra-user Tweet representations. To suppress the noise in a\nsingle Tweet, we also model the similar Tweets posted by all other users with\nreinforced inter-user representation learning techniques. Experimentally, we\nshow that leveraging these two representations can significantly improve the\nf-score of a strong bidirectional LSTM baseline model by 10.1%.", + "authors": "Jing Qian, Mai ElSherief, Elizabeth M. Belding, William Yang Wang", + "published": "2018-04-09", + "updated": "2018-09-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1706.03762v7", + "title": "Attention Is All You Need", + "abstract": "The dominant sequence transduction models are based on complex recurrent or\nconvolutional neural networks in an encoder-decoder configuration. The best\nperforming models also connect the encoder and decoder through an attention\nmechanism. We propose a new simple network architecture, the Transformer, based\nsolely on attention mechanisms, dispensing with recurrence and convolutions\nentirely. Experiments on two machine translation tasks show these models to be\nsuperior in quality while being more parallelizable and requiring significantly\nless time to train. Our model achieves 28.4 BLEU on the WMT 2014\nEnglish-to-German translation task, improving over the existing best results,\nincluding ensembles by over 2 BLEU. On the WMT 2014 English-to-French\ntranslation task, our model establishes a new single-model state-of-the-art\nBLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction\nof the training costs of the best models from the literature. We show that the\nTransformer generalizes well to other tasks by applying it successfully to\nEnglish constituency parsing both with large and limited training data.", + "authors": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin", + "published": "2017-06-12", + "updated": "2023-08-02", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.11799v1", + "title": "DeepHate: Hate Speech Detection via Multi-Faceted Text Representations", + "abstract": "Online hate speech is an important issue that breaks the cohesiveness of\nonline social communities and even raises public safety concerns in our\nsocieties. Motivated by this rising issue, researchers have developed many\ntraditional machine learning and deep learning methods to detect hate speech in\nonline social platforms automatically. However, most of these methods have only\nconsidered single type textual feature, e.g., term frequency, or using word\nembeddings. Such approaches neglect the other rich textual information that\ncould be utilized to improve hate speech detection. In this paper, we propose\nDeepHate, a novel deep learning model that combines multi-faceted text\nrepresentations such as word embeddings, sentiments, and topical information,\nto detect hate speech in online social platforms. We conduct extensive\nexperiments and evaluate DeepHate on three large publicly available real-world\ndatasets. Our experiment results show that DeepHate outperforms the\nstate-of-the-art baselines on the hate speech detection task. We also perform\ncase studies to provide insights into the salient features that best aid in\ndetecting hate speech in online social platforms.", + "authors": "Rui Cao, Roy Ka-Wei Lee, Tuan-Anh Hoang", + "published": "2021-03-14", + "updated": "2021-03-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.SI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1809.04444v1", + "title": "Hate Speech Dataset from a White Supremacy Forum", + "abstract": "Hate speech is commonly defined as any communication that disparages a target\ngroup of people based on some characteristic such as race, colour, ethnicity,\ngender, sexual orientation, nationality, religion, or other characteristic. Due\nto the massive rise of user-generated web content on social media, the amount\nof hate speech is also steadily increasing. Over the past years, interest in\nonline hate speech detection and, particularly, the automation of this task has\ncontinuously grown, along with the societal impact of the phenomenon. This\npaper describes a hate speech dataset composed of thousands of sentences\nmanually labelled as containing hate speech or not. The sentences have been\nextracted from Stormfront, a white supremacist forum. A custom annotation tool\nhas been developed to carry out the manual labelling task which, among other\nthings, allows the annotators to choose whether to read the context of a\nsentence before labelling it. The paper also provides a thoughtful qualitative\nand quantitative study of the resulting dataset and several baseline\nexperiments with different classification models. The dataset is publicly\navailable.", + "authors": "Ona de Gibert, Naiara Perez, Aitor Garc\u00eda-Pablos, Montse Cuadros", + "published": "2018-09-12", + "updated": "2018-09-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2010.12472v2", + "title": "HateBERT: Retraining BERT for Abusive Language Detection in English", + "abstract": "In this paper, we introduce HateBERT, a re-trained BERT model for abusive\nlanguage detection in English. The model was trained on RAL-E, a large-scale\ndataset of Reddit comments in English from communities banned for being\noffensive, abusive, or hateful that we have collected and made available to the\npublic. We present the results of a detailed comparison between a general\npre-trained language model and the abuse-inclined version obtained by\nretraining with posts from the banned communities on three English datasets for\noffensive, abusive language and hate speech detection tasks. In all datasets,\nHateBERT outperforms the corresponding general BERT model. We also discuss a\nbattery of experiments comparing the portability of the generic pre-trained\nlanguage model and its corresponding abusive language-inclined counterpart\nacross the datasets, indicating that portability is affected by compatibility\nof the annotated phenomena.", + "authors": "Tommaso Caselli, Valerio Basile, Jelena Mitrovi\u0107, Michael Granitzer", + "published": "2020-10-23", + "updated": "2021-02-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1706.00188v1", + "title": "Deep Learning for Hate Speech Detection in Tweets", + "abstract": "Hate speech detection on Twitter is critical for applications like\ncontroversial event extraction, building AI chatterbots, content\nrecommendation, and sentiment analysis. We define this task as being able to\nclassify a tweet as racist, sexist or neither. The complexity of the natural\nlanguage constructs makes this task very challenging. We perform extensive\nexperiments with multiple deep learning architectures to learn semantic word\nembeddings to handle this complexity. Our experiments on a benchmark dataset of\n16K annotated tweets show that such deep learning methods outperform\nstate-of-the-art char/word n-gram methods by ~18 F1 points.", + "authors": "Pinkesh Badjatiya, Shashank Gupta, Manish Gupta, Vasudeva Varma", + "published": "2017-06-01", + "updated": "2017-06-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1703.04009v1", + "title": "Automated Hate Speech Detection and the Problem of Offensive Language", + "abstract": "A key challenge for automatic hate-speech detection on social media is the\nseparation of hate speech from other instances of offensive language. Lexical\ndetection methods tend to have low precision because they classify all messages\ncontaining particular terms as hate speech and previous work using supervised\nlearning has failed to distinguish between the two categories. We used a\ncrowd-sourced hate speech lexicon to collect tweets containing hate speech\nkeywords. We use crowd-sourcing to label a sample of these tweets into three\ncategories: those containing hate speech, only offensive language, and those\nwith neither. We train a multi-class classifier to distinguish between these\ndifferent categories. Close analysis of the predictions and the errors shows\nwhen we can reliably separate hate speech from other offensive language and\nwhen this differentiation is more difficult. We find that racist and homophobic\ntweets are more likely to be classified as hate speech but that sexist tweets\nare generally classified as offensive. Tweets without explicit hate keywords\nare also more difficult to classify.", + "authors": "Thomas Davidson, Dana Warmsley, Michael Macy, Ingmar Weber", + "published": "2017-03-11", + "updated": "2017-03-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1910.12574v1", + "title": "A BERT-Based Transfer Learning Approach for Hate Speech Detection in Online Social Media", + "abstract": "Generated hateful and toxic content by a portion of users in social media is\na rising phenomenon that motivated researchers to dedicate substantial efforts\nto the challenging direction of hateful content identification. We not only\nneed an efficient automatic hate speech detection model based on advanced\nmachine learning and natural language processing, but also a sufficiently large\namount of annotated data to train a model. The lack of a sufficient amount of\nlabelled hate speech data, along with the existing biases, has been the main\nissue in this domain of research. To address these needs, in this study we\nintroduce a novel transfer learning approach based on an existing pre-trained\nlanguage model called BERT (Bidirectional Encoder Representations from\nTransformers). More specifically, we investigate the ability of BERT at\ncapturing hateful context within social media content by using new fine-tuning\nmethods based on transfer learning. To evaluate our proposed approach, we use\ntwo publicly available datasets that have been annotated for racism, sexism,\nhate, or offensive content on Twitter. The results show that our solution\nobtains considerable performance on these datasets in terms of precision and\nrecall in comparison to existing approaches. Consequently, our model can\ncapture some biases in data annotation and collection process and can\npotentially lead us to a more accurate model.", + "authors": "Marzieh Mozafari, Reza Farahbakhsh, Noel Crespi", + "published": "2019-10-28", + "updated": "2019-10-28", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "cs.CL", + "cs.IR", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.11800v1", + "title": "AngryBERT: Joint Learning Target and Emotion for Hate Speech Detection", + "abstract": "Automated hate speech detection in social media is a challenging task that\nhas recently gained significant traction in the data mining and Natural\nLanguage Processing community. However, most of the existing methods adopt a\nsupervised approach that depended heavily on the annotated hate speech\ndatasets, which are imbalanced and often lack training samples for hateful\ncontent. This paper addresses the research gaps by proposing a novel multitask\nlearning-based model, AngryBERT, which jointly learns hate speech detection\nwith sentiment classification and target identification as secondary relevant\ntasks. We conduct extensive experiments to augment three commonly-used hate\nspeech detection datasets. Our experiment results show that AngryBERT\noutperforms state-of-the-art single-task-learning and multitask learning\nbaselines. We conduct ablation studies and case studies to empirically examine\nthe strengths and characteristics of our AngryBERT model and show that the\nsecondary tasks are able to improve hate speech detection.", + "authors": "Md Rabiul Awal, Rui Cao, Roy Ka-Wei Lee, Sandra Mitrovic", + "published": "2021-03-14", + "updated": "2021-03-14", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.SI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.01111v2", + "title": "Large-Scale Hate Speech Detection with Cross-Domain Transfer", + "abstract": "The performance of hate speech detection models relies on the datasets on\nwhich the models are trained. Existing datasets are mostly prepared with a\nlimited number of instances or hate domains that define hate topics. This\nhinders large-scale analysis and transfer learning with respect to hate\ndomains. In this study, we construct large-scale tweet datasets for hate speech\ndetection in English and a low-resource language, Turkish, consisting of\nhuman-labeled 100k tweets per each. Our datasets are designed to have equal\nnumber of tweets distributed over five domains. The experimental results\nsupported by statistical tests show that Transformer-based language models\noutperform conventional bag-of-words and neural models by at least 5% in\nEnglish and 10% in Turkish for large-scale hate speech detection. The\nperformance is also scalable to different training sizes, such that 98% of\nperformance in English, and 97% in Turkish, are recovered when 20% of training\ninstances are used. We further examine the generalization ability of\ncross-domain transfer among hate domains. We show that 96% of the performance\nof a target domain in average is recovered by other domains for English, and\n92% for Turkish. Gender and religion are more successful to generalize to other\ndomains, while sports fail most.", + "authors": "Cagri Toraman, Furkan \u015eahinu\u00e7, Eyup Halit Yilmaz", + "published": "2022-03-02", + "updated": "2022-07-05", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.SI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2205.01848v2", + "title": "Optimizing Mixture of Experts using Dynamic Recompilations", + "abstract": "The Mixture of Experts architecture allows for outrageously large neural\nnetworks by scaling model parameter size independently from computational\ndemand (FLOPs). However, current DNN frameworks cannot effectively support the\ndynamic data flow in Mixture of Experts, and implementations on top of these\nframeworks need to use workarounds that introduce significant overheads. To\naddress the limitation of these frameworks, we present DynaMoE, a DNN library\nthat uses dynamic recompilations to optimize and adapt the use of computational\nresources to the dynamic needs of Mixture of Experts models. Our evaluation\nshows that DynaMoE achieves a 1.8x speedup and supports 2.3x larger model sizes\nwhen compared to existing MoE systems, even when not using recompilations. We\nthen present further optimizations enabled by dynamic recompilations that yield\nan additional 1.7x speedup while simultaneously reducing memory pressure and\nimproving model quality.", + "authors": "Ferdinand Kossmann, Zhihao Jia, Alex Aiken", + "published": "2022-05-04", + "updated": "2022-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.03994v1", + "title": "Video Relationship Detection Using Mixture of Experts", + "abstract": "Machine comprehension of visual information from images and videos by neural\nnetworks faces two primary challenges. Firstly, there exists a computational\nand inference gap in connecting vision and language, making it difficult to\naccurately determine which object a given agent acts on and represent it\nthrough language. Secondly, classifiers trained by a single, monolithic neural\nnetwork often lack stability and generalization. To overcome these challenges,\nwe introduce MoE-VRD, a novel approach to visual relationship detection\nutilizing a mixture of experts. MoE-VRD identifies language triplets in the\nform of < subject, predicate, object> tuples to extract relationships from\nvisual processing. Leveraging recent advancements in visual relationship\ndetection, MoE-VRD addresses the requirement for action recognition in\nestablishing relationships between subjects (acting) and objects (being acted\nupon). In contrast to single monolithic networks, MoE-VRD employs multiple\nsmall models as experts, whose outputs are aggregated. Each expert in MoE-VRD\nspecializes in visual relationship learning and object tagging. By utilizing a\nsparsely-gated mixture of experts, MoE-VRD enables conditional computation and\nsignificantly enhances neural network capacity without increasing computational\ncomplexity. Our experimental results demonstrate that the conditional\ncomputation capabilities and scalability of the mixture-of-experts approach\nlead to superior performance in visual relationship detection compared to\nstate-of-the-art methods.", + "authors": "Ala Shaabana, Zahra Gharaee, Paul Fieguth", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.14800v1", + "title": "Not All Experts are Equal: Efficient Expert Pruning and Skipping for Mixture-of-Experts Large Language Models", + "abstract": "A pivotal advancement in the progress of large language models (LLMs) is the\nemergence of the Mixture-of-Experts (MoE) LLMs. Compared to traditional LLMs,\nMoE LLMs can achieve higher performance with fewer parameters, but it is still\nhard to deploy them due to their immense parameter sizes. Different from\nprevious weight pruning methods that rely on specifically designed hardware,\nthis paper mainly aims to enhance the deployment efficiency of MoE LLMs by\nintroducing plug-and-play expert-level sparsification techniques. Specifically,\nwe propose, for the first time to our best knowledge, post-training approaches\nfor task-agnostic and task-specific expert pruning and skipping of MoE LLMs,\ntailored to improve deployment efficiency while maintaining model performance\nacross a wide range of tasks. Extensive experiments show that our proposed\nmethods can simultaneously reduce model sizes and increase the inference speed,\nwhile maintaining satisfactory performance. Data and code will be available at\nhttps://github.com/Lucky-Lance/Expert_Sparsity.", + "authors": "Xudong Lu, Qi Liu, Yuhui Xu, Aojun Zhou, Siyuan Huang, Bo Zhang, Junchi Yan, Hongsheng Li", + "published": "2024-02-22", + "updated": "2024-02-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1605.01652v1", + "title": "LSTM-based Mixture-of-Experts for Knowledge-Aware Dialogues", + "abstract": "We introduce an LSTM-based method for dynamically integrating several\nword-prediction experts to obtain a conditional language model which can be\ngood simultaneously at several subtasks. We illustrate this general approach\nwith an application to dialogue where we integrate a neural chat model, good at\nconversational aspects, with a neural question-answering model, good at\nretrieving precise information from a knowledge-base, and show how the\nintegration combines the strengths of the independent components. We hope that\nthis focused contribution will attract attention on the benefits of using such\nmixtures of experts in NLP.", + "authors": "Phong Le, Marc Dymetman, Jean-Michel Renders", + "published": "2016-05-05", + "updated": "2016-05-05", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12656v2", + "title": "HyperMoE: Paying Attention to Unselected Experts in Mixture of Experts via Dynamic Transfer", + "abstract": "The Mixture of Experts (MoE) for language models has been proven effective in\naugmenting the capacity of models by dynamically routing each input token to a\nspecific subset of experts for processing. Despite the success, most existing\nmethods face a challenge for balance between sparsity and the availability of\nexpert knowledge: enhancing performance through increased use of expert\nknowledge often results in diminishing sparsity during expert selection. To\nmitigate this contradiction, we propose HyperMoE, a novel MoE framework built\nupon Hypernetworks. This framework integrates the computational processes of\nMoE with the concept of knowledge transferring in multi-task learning. Specific\nmodules generated based on the information of unselected experts serve as\nsupplementary information, which allows the knowledge of experts not selected\nto be used while maintaining selection sparsity. Our comprehensive empirical\nevaluations across multiple datasets and backbones establish that HyperMoE\nsignificantly outperforms existing MoE methods under identical conditions\nconcerning the number of experts.", + "authors": "Hao Zhao, Zihan Qiu, Huijia Wu, Zili Wang, Zhaofeng He, Jie Fu", + "published": "2024-02-20", + "updated": "2024-02-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1903.07756v1", + "title": "Hierarchical Routing Mixture of Experts", + "abstract": "In regression tasks the distribution of the data is often too complex to be\nfitted by a single model. In contrast, partition-based models are developed\nwhere data is divided and fitted by local models. These models partition the\ninput space and do not leverage the input-output dependency of\nmultimodal-distributed data, and strong local models are needed to make good\npredictions. Addressing these problems, we propose a binary tree-structured\nhierarchical routing mixture of experts (HRME) model that has classifiers as\nnon-leaf node experts and simple regression models as leaf node experts. The\nclassifier nodes jointly soft-partition the input-output space based on the\nnatural separateness of multimodal data. This enables simple leaf experts to be\neffective for prediction. Further, we develop a probabilistic framework for the\nHRME model, and propose a recursive Expectation-Maximization (EM) based\nalgorithm to learn both the tree structure and the expert models. Experiments\non a collection of regression tasks validate the effectiveness of our method\ncompared to a variety of other regression models.", + "authors": "Wenbo Zhao, Yang Gao, Shahan Ali Memon, Bhiksha Raj, Rita Singh", + "published": "2019-03-18", + "updated": "2019-03-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2011.01613v1", + "title": "Towards a Universal Gating Network for Mixtures of Experts", + "abstract": "The combination and aggregation of knowledge from multiple neural networks\ncan be commonly seen in the form of mixtures of experts. However, such\ncombinations are usually done using networks trained on the same tasks, with\nlittle mention of the combination of heterogeneous pre-trained networks,\nespecially in the data-free regime. This paper proposes multiple data-free\nmethods for the combination of heterogeneous neural networks, ranging from the\nutilization of simple output logit statistics, to training specialized gating\nnetworks. The gating networks decide whether specific inputs belong to specific\nnetworks based on the nature of the expert activations generated. The\nexperiments revealed that the gating networks, including the universal gating\napproach, constituted the most accurate approach, and therefore represent a\npragmatic step towards applications with heterogeneous mixtures of experts in a\ndata-free regime. The code for this project is hosted on github at\nhttps://github.com/cwkang1998/network-merging.", + "authors": "Chen Wen Kang, Chua Meng Hong, Tomas Maul", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02629v2", + "title": "BA-MoE: Boundary-Aware Mixture-of-Experts Adapter for Code-Switching Speech Recognition", + "abstract": "Mixture-of-experts based models, which use language experts to extract\nlanguage-specific representations effectively, have been well applied in\ncode-switching automatic speech recognition. However, there is still\nsubstantial space to improve as similar pronunciation across languages may\nresult in ineffective multi-language modeling and inaccurate language boundary\nestimation. To eliminate these drawbacks, we propose a cross-layer language\nadapter and a boundary-aware training method, namely Boundary-Aware\nMixture-of-Experts (BA-MoE). Specifically, we introduce language-specific\nadapters to separate language-specific representations and a unified gating\nlayer to fuse representations within each encoder layer. Second, we compute\nlanguage adaptation loss of the mean output of each language-specific adapter\nto improve the adapter module's language-specific representation learning.\nBesides, we utilize a boundary-aware predictor to learn boundary\nrepresentations for dealing with language boundary confusion. Our approach\nachieves significant performance improvement, reducing the mixture error rate\nby 16.55\\% compared to the baseline on the ASRU 2019 Mandarin-English\ncode-switching challenge dataset.", + "authors": "Peikun Chen, Fan Yu, Yuhao Lian, Hongfei Xue, Xucheng Wan, Naijun Zheng, Huan Zhou, Lei Xie", + "published": "2023-10-04", + "updated": "2023-10-08", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2010.14260v2", + "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", + "abstract": "In this paper, we consider mixtures of two Mallows models for top-$k$\nrankings, both with the same location parameter but with different scale\nparameters, i.e., a mixture of concentric Mallows models. This situation arises\nwhen we have a heterogeneous population of voters formed by two homogeneous\npopulations, one of which is a subpopulation of expert voters while the other\nincludes the non-expert voters. We propose efficient sampling algorithms for\nMallows top-$k$ rankings. We show the identifiability of both components, and\nthe learnability of their respective parameters in this setting by, first,\nbounding the sample complexity for the Borda algorithm with top-$k$ rankings\nand second, proposing polynomial time algorithm for the separation of the\nrankings in each component. Finally, since the rank aggregation will suffer\nfrom a large amount of noise introduced by the non-expert voters, we adapt the\nBorda algorithm to be able to recover the ground truth consensus ranking which\nis especially consistent with the expert rankings.", + "authors": "Collas Fabien, Irurozki Ekhine", + "published": "2020-10-27", + "updated": "2020-11-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09762v1", + "title": "Diversifying the Mixture-of-Experts Representation for Language Models with Orthogonal Optimizer", + "abstract": "The Mixture of Experts (MoE) has emerged as a highly successful technique in\ndeep learning, based on the principle of divide-and-conquer to maximize model\ncapacity without significant additional computational cost. Even in the era of\nlarge-scale language models (LLMs), MoE continues to play a crucial role, as\nsome researchers have indicated that GPT-4 adopts the MoE structure to ensure\ndiverse inference results. However, MoE is susceptible to performance\ndegeneracy, particularly evident in the issues of imbalance and homogeneous\nrepresentation among experts. While previous studies have extensively addressed\nthe problem of imbalance, the challenge of homogeneous representation remains\nunresolved. In this study, we shed light on the homogeneous representation\nproblem, wherein experts in the MoE fail to specialize and lack diversity,\nleading to frustratingly high similarities in their representations (up to 99%\nin a well-performed MoE model). This problem restricts the expressive power of\nthe MoE and, we argue, contradicts its original intention. To tackle this\nissue, we propose a straightforward yet highly effective solution: OMoE, an\northogonal expert optimizer. Additionally, we introduce an alternating training\nstrategy that encourages each expert to update in a direction orthogonal to the\nsubspace spanned by other experts. Our algorithm facilitates MoE training in\ntwo key ways: firstly, it explicitly enhances representation diversity, and\nsecondly, it implicitly fosters interaction between experts during orthogonal\nweights computation. Through extensive experiments, we demonstrate that our\nproposed optimization algorithm significantly improves the performance of\nfine-tuning the MoE model on the GLUE benchmark, SuperGLUE benchmark,\nquestion-answering task, and name entity recognition tasks.", + "authors": "Boan Liu, Liang Ding, Li Shen, Keqin Peng, Yu Cao, Dazhao Cheng, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-10-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1806.08200v1", + "title": "Mixtures of Experts Models", + "abstract": "Mixtures of experts models provide a framework in which covariates may be\nincluded in mixture models. This is achieved by modelling the parameters of the\nmixture model as functions of the concomitant covariates. Given their mixture\nmodel foundation, mixtures of experts models possess a diverse range of\nanalytic uses, from clustering observations to capturing parameter\nheterogeneity in cross-sectional data. This chapter focuses on delineating the\nmixture of experts modelling framework and demonstrates the utility and\nflexibility of mixtures of experts models as an analytic tool.", + "authors": "Isobel Claire Gormley, Sylvia Fr\u00fchwirth-Schnatter", + "published": "2018-06-21", + "updated": "2018-06-21", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2107.04694v1", + "title": "Lifelong Mixture of Variational Autoencoders", + "abstract": "In this paper, we propose an end-to-end lifelong learning mixture of experts.\nEach expert is implemented by a Variational Autoencoder (VAE). The experts in\nthe mixture system are jointly trained by maximizing a mixture of individual\ncomponent evidence lower bounds (MELBO) on the log-likelihood of the given\ntraining samples. The mixing coefficients in the mixture, control the\ncontributions of each expert in the goal representation. These are sampled from\na Dirichlet distribution whose parameters are determined through non-parametric\nestimation during lifelong learning. The model can learn new tasks fast when\nthese are similar to those previously learnt. The proposed Lifelong mixture of\nVAE (L-MVAE) expands its architecture with new components when learning a\ncompletely new task. After the training, our model can automatically determine\nthe relevant expert to be used when fed with new data samples. This mechanism\nbenefits both the memory efficiency and the required computational cost as only\none expert is used during the inference. The L-MVAE inference model is able to\nperform interpolation in the joint latent space across the data domains\nassociated with different tasks and is shown to be efficient for disentangled\nlearning representation.", + "authors": "Fei Ye, Adrian G. Bors", + "published": "2021-07-09", + "updated": "2021-07-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.16710v1", + "title": "Prediction Sets for High-Dimensional Mixture of Experts Models", + "abstract": "Large datasets make it possible to build predictive models that can capture\nheterogenous relationships between the response variable and features. The\nmixture of high-dimensional linear experts model posits that observations come\nfrom a mixture of high-dimensional linear regression models, where the mixture\nweights are themselves feature-dependent. In this paper, we show how to\nconstruct valid prediction sets for an $\\ell_1$-penalized mixture of experts\nmodel in the high-dimensional setting. We make use of a debiasing procedure to\naccount for the bias induced by the penalization and propose a novel strategy\nfor combining intervals to form a prediction set with coverage guarantees in\nthe mixture setting. Synthetic examples and an application to the prediction of\ncritical temperatures of superconducting materials show our method to have\nreliable practical performance.", + "authors": "Adel Javanmard, Simeng Shao, Jacob Bien", + "published": "2022-10-30", + "updated": "2022-10-30", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1405.7624v1", + "title": "Simultaneous Feature and Expert Selection within Mixture of Experts", + "abstract": "A useful strategy to deal with complex classification scenarios is the\n\"divide and conquer\" approach. The mixture of experts (MOE) technique makes use\nof this strategy by joinly training a set of classifiers, or experts, that are\nspecialized in different regions of the input space. A global model, or gate\nfunction, complements the experts by learning a function that weights their\nrelevance in different parts of the input space. Local feature selection\nappears as an attractive alternative to improve the specialization of experts\nand gate function, particularly, for the case of high dimensional data. Our\nmain intuition is that particular subsets of dimensions, or subspaces, are\nusually more appropriate to classify instances located in different regions of\nthe input space. Accordingly, this work contributes with a regularized variant\nof MoE that incorporates an embedded process for local feature selection using\n$L1$ regularization, with a simultaneous expert selection. The experiments are\nstill pending.", + "authors": "Billy Peralta", + "published": "2014-05-29", + "updated": "2014-05-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1809.04853v2", + "title": "Bayesian shrinkage in mixture of experts models: Identifying robust determinants of class membership", + "abstract": "A method for implicit variable selection in mixture of experts frameworks is\nproposed. We introduce a prior structure where information is taken from a set\nof independent covariates. Robust class membership predictors are identified\nusing a normal gamma prior. The resulting model setup is used in a finite\nmixture of Bernoulli distributions to find homogenous clusters of women in\nMozambique based on their information sources on HIV. Fully Bayesian inference\nis carried out via the implementation of a Gibbs sampler.", + "authors": "Gregor Zens", + "published": "2018-09-13", + "updated": "2019-01-12", + "primary_cat": "econ.EM", + "cats": [ + "econ.EM", + "62F15, 62J07, 62H30, 90-08" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.05238v3", + "title": "Universal Simultaneous Machine Translation with Mixture-of-Experts Wait-k Policy", + "abstract": "Simultaneous machine translation (SiMT) generates translation before reading\nthe entire source sentence and hence it has to trade off between translation\nquality and latency. To fulfill the requirements of different translation\nquality and latency in practical applications, the previous methods usually\nneed to train multiple SiMT models for different latency levels, resulting in\nlarge computational costs. In this paper, we propose a universal SiMT model\nwith Mixture-of-Experts Wait-k Policy to achieve the best translation quality\nunder arbitrary latency with only one trained model. Specifically, our method\nemploys multi-head attention to accomplish the mixture of experts where each\nhead is treated as a wait-k expert with its own waiting words number, and given\na test latency and source inputs, the weights of the experts are accordingly\nadjusted to produce the best translation. Experiments on three datasets show\nthat our method outperforms all the strong baselines under different latency,\nincluding the state-of-the-art adaptive policy.", + "authors": "Shaolei Zhang, Yang Feng", + "published": "2021-09-11", + "updated": "2022-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.00372v1", + "title": "Visual Saliency Prediction Using a Mixture of Deep Neural Networks", + "abstract": "Visual saliency models have recently begun to incorporate deep learning to\nachieve predictive capacity much greater than previous unsupervised methods.\nHowever, most existing models predict saliency using local mechanisms limited\nto the receptive field of the network. We propose a model that incorporates\nglobal scene semantic information in addition to local information gathered by\na convolutional neural network. Our model is formulated as a mixture of\nexperts. Each expert network is trained to predict saliency for a set of\nclosely related images. The final saliency map is computed as a weighted\nmixture of the expert networks' output, with weights determined by a separate\ngating network. This gating network is guided by global scene information to\npredict weights. The expert networks and the gating network are trained\nsimultaneously in an end-to-end manner. We show that our mixture formulation\nleads to improvement in performance over an otherwise identical non-mixture\nmodel that does not incorporate global scene information.", + "authors": "Samuel Dodge, Lina Karam", + "published": "2017-02-01", + "updated": "2017-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05444v1", + "title": "Pushing Mixture of Experts to the Limit: Extremely Parameter Efficient MoE for Instruction Tuning", + "abstract": "The Mixture of Experts (MoE) is a widely known neural architecture where an\nensemble of specialized sub-models optimizes overall performance with a\nconstant computational cost. However, conventional MoEs pose challenges at\nscale due to the need to store all experts in memory. In this paper, we push\nMoE to the limit. We propose extremely parameter-efficient MoE by uniquely\ncombining MoE architecture with lightweight experts.Our MoE architecture\noutperforms standard parameter-efficient fine-tuning (PEFT) methods and is on\npar with full fine-tuning by only updating the lightweight experts -- less than\n1% of an 11B parameters model. Furthermore, our method generalizes to unseen\ntasks as it does not depend on any prior task knowledge. Our research\nunderscores the versatility of the mixture of experts architecture, showcasing\nits ability to deliver robust performance even when subjected to rigorous\nparameter constraints. Our code used in all the experiments is publicly\navailable here: https://github.com/for-ai/parameter-efficient-moe.", + "authors": "Ted Zadouri, Ahmet \u00dcst\u00fcn, Arash Ahmadian, Beyza Ermi\u015f, Acyr Locatelli, Sara Hooker", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.11449v2", + "title": "Dynamic Mixture of Experts Models for Online Prediction", + "abstract": "A mixture of experts models the conditional density of a response variable\nusing a mixture of regression models with covariate-dependent mixture weights.\nWe extend the finite mixture of experts model by allowing the parameters in\nboth the mixture components and the weights to evolve in time by following\nrandom walk processes. Inference for time-varying parameters in richly\nparameterized mixture of experts models is challenging. We propose a sequential\nMonte Carlo algorithm for online inference and based on a tailored proposal\ndistribution built on ideas from linear Bayes methods and the EM algorithm. The\nmethod gives a unified treatment for mixtures with time-varying parameters,\nincluding the special case of static parameters. We assess the properties of\nthe method on simulated data and on industrial data where the aim is to predict\nsoftware faults in a continuously upgraded large-scale software project.", + "authors": "Parfait Munezero, Mattias Villani, Robert Kohn", + "published": "2021-09-23", + "updated": "2022-10-13", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO", + "stat.AP" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.06066v1", + "title": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", + "abstract": "In the era of large language models, Mixture-of-Experts (MoE) is a promising\narchitecture for managing computational costs when scaling up model parameters.\nHowever, conventional MoE architectures like GShard, which activate the top-$K$\nout of $N$ experts, face challenges in ensuring expert specialization, i.e.\neach expert acquires non-overlapping and focused knowledge. In response, we\npropose the DeepSeekMoE architecture towards ultimate expert specialization. It\ninvolves two principal strategies: (1) finely segmenting the experts into $mN$\nones and activating $mK$ from them, allowing for a more flexible combination of\nactivated experts; (2) isolating $K_s$ experts as shared ones, aiming at\ncapturing common knowledge and mitigating redundancy in routed experts.\nStarting from a modest scale with 2B parameters, we demonstrate that\nDeepSeekMoE 2B achieves comparable performance with GShard 2.9B, which has 1.5\ntimes the expert parameters and computation. In addition, DeepSeekMoE 2B nearly\napproaches the performance of its dense counterpart with the same number of\ntotal parameters, which set the upper bound of MoE models. Subsequently, we\nscale up DeepSeekMoE to 16B parameters and show that it achieves comparable\nperformance with LLaMA2 7B, with only about 40% of computations. Further, our\npreliminary efforts to scale up DeepSeekMoE to 145B parameters consistently\nvalidate its substantial advantages over the GShard architecture, and show its\nperformance comparable with DeepSeek 67B, using only 28.5% (maybe even 18.2%)\nof computations.", + "authors": "Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, Wenfeng Liang", + "published": "2024-01-11", + "updated": "2024-01-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17749v1", + "title": "Multi-Task Dense Prediction via Mixture of Low-Rank Experts", + "abstract": "Previous multi-task dense prediction methods based on the Mixture of Experts\n(MoE) have received great performance but they neglect the importance of\nexplicitly modeling the global relations among all tasks. In this paper, we\npresent a novel decoder-focused method for multi-task dense prediction, called\nMixture-of-Low-Rank-Experts (MLoRE). To model the global task relationships,\nMLoRE adds a generic convolution path to the original MoE structure, where each\ntask feature can go through this path for explicit parameter sharing.\nFurthermore, to control the parameters and computational cost brought by the\nincrease in the number of experts, we take inspiration from LoRA and propose to\nleverage the low-rank format of a vanilla convolution in the expert network.\nSince the low-rank experts have fewer parameters and can be dynamically\nparameterized into the generic convolution, the parameters and computational\ncost do not change much with the increase of experts. Benefiting from this\ndesign, we increase the number of experts and its reception field to enlarge\nthe representation capacity, facilitating multiple dense tasks learning in a\nunified network. Extensive experiments on the PASCAL-Context and NYUD-v2\nbenchmarks show that our MLoRE achieves superior performance compared to\nprevious state-of-the-art methods on all metrics. Our code is available at\nhttps://github.com/YuqiYang213/MLoRE.", + "authors": "Yuqi Yang, Peng-Tao Jiang, Qibin Hou, Hao Zhang, Jinwei Chen, Bo Li", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1409.4698v1", + "title": "A Mixtures-of-Experts Framework for Multi-Label Classification", + "abstract": "We develop a novel probabilistic approach for multi-label classification that\nis based on the mixtures-of-experts architecture combined with recently\nintroduced conditional tree-structured Bayesian networks. Our approach captures\ndifferent input-output relations from multi-label data using the efficient\ntree-structured classifiers, while the mixtures-of-experts architecture aims to\ncompensate for the tree-structured restrictions and build a more accurate\nmodel. We develop and present algorithms for learning the model from data and\nfor performing multi-label predictions on future data instances. Experiments on\nmultiple benchmark datasets demonstrate that our approach achieves highly\ncompetitive results and outperforms the existing state-of-the-art multi-label\nclassification methods.", + "authors": "Charmgil Hong, Iyad Batal, Milos Hauskrecht", + "published": "2014-09-16", + "updated": "2014-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "I.2.6" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.13934v1", + "title": "Functional mixture-of-experts for classification", + "abstract": "We develop a mixtures-of-experts (ME) approach to the multiclass\nclassification where the predictors are univariate functions. It consists of a\nME model in which both the gating network and the experts network are\nconstructed upon multinomial logistic activation functions with functional\ninputs. We perform a regularized maximum likelihood estimation in which the\ncoefficient functions enjoy interpretable sparsity constraints on targeted\nderivatives. We develop an EM-Lasso like algorithm to compute the regularized\nMLE and evaluate the proposed approach on simulated and real data.", + "authors": "Nhat Thien Pham, Faicel Chamroukhi", + "published": "2022-02-28", + "updated": "2022-02-28", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.06966v1", + "title": "Acquiring Diverse Skills using Curriculum Reinforcement Learning with Mixture of Experts", + "abstract": "Reinforcement learning (RL) is a powerful approach for acquiring a\ngood-performing policy. However, learning diverse skills is challenging in RL\ndue to the commonly used Gaussian policy parameterization. We propose\n\\textbf{Di}verse \\textbf{Skil}l \\textbf{L}earning (Di-SkilL), an RL method for\nlearning diverse skills using Mixture of Experts, where each expert formalizes\na skill as a contextual motion primitive. Di-SkilL optimizes each expert and\nits associate context distribution to a maximum entropy objective that\nincentivizes learning diverse skills in similar contexts. The per-expert\ncontext distribution enables automatic curricula learning, allowing each expert\nto focus on its best-performing sub-region of the context space. To overcome\nhard discontinuities and multi-modalities without any prior knowledge of the\nenvironment's unknown context probability space, we leverage energy-based\nmodels to represent the per-expert context distributions and demonstrate how we\ncan efficiently train them using the standard policy gradient objective. We\nshow on challenging robot simulation tasks that Di-SkilL can learn diverse and\nperformant skills.", + "authors": "Onur Celik, Aleksandar Taranovic, Gerhard Neumann", + "published": "2024-03-11", + "updated": "2024-03-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.01899v1", + "title": "MiCE: Mixture of Contrastive Experts for Unsupervised Image Clustering", + "abstract": "We present Mixture of Contrastive Experts (MiCE), a unified probabilistic\nclustering framework that simultaneously exploits the discriminative\nrepresentations learned by contrastive learning and the semantic structures\ncaptured by a latent mixture model. Motivated by the mixture of experts, MiCE\nemploys a gating function to partition an unlabeled dataset into subsets\naccording to the latent semantics and multiple experts to discriminate distinct\nsubsets of instances assigned to them in a contrastive learning manner. To\nsolve the nontrivial inference and learning problems caused by the latent\nvariables, we further develop a scalable variant of the\nExpectation-Maximization (EM) algorithm for MiCE and provide proof of the\nconvergence. Empirically, we evaluate the clustering performance of MiCE on\nfour widely adopted natural image datasets. MiCE achieves significantly better\nresults than various previous methods and a strong contrastive learning\nbaseline.", + "authors": "Tsung Wei Tsai, Chongxuan Li, Jun Zhu", + "published": "2021-05-05", + "updated": "2021-05-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.09179v1", + "title": "SiRA: Sparse Mixture of Low Rank Adaptation", + "abstract": "Parameter Efficient Tuning has been an prominent approach to adapt the Large\nLanguage Model to downstream tasks. Most previous works considers adding the\ndense trainable parameters, where all parameters are used to adapt certain\ntask. We found this less effective empirically using the example of LoRA that\nintroducing more trainable parameters does not help. Motivated by this we\ninvestigate the importance of leveraging \"sparse\" computation and propose SiRA:\nsparse mixture of low rank adaption. SiRA leverages the Sparse Mixture of\nExpert(SMoE) to boost the performance of LoRA. Specifically it enforces the top\n$k$ experts routing with a capacity limit restricting the maximum number of\ntokens each expert can process. We propose a novel and simple expert dropout on\ntop of gating network to reduce the over-fitting issue. Through extensive\nexperiments, we verify SiRA performs better than LoRA and other mixture of\nexpert approaches across different single tasks and multitask settings.", + "authors": "Yun Zhu, Nevan Wichers, Chu-Cheng Lin, Xinyi Wang, Tianlong Chen, Lei Shu, Han Lu, Canoee Liu, Liangchen Luo, Jindong Chen, Lei Meng", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.08245v1", + "title": "Scattered Mixture-of-Experts Implementation", + "abstract": "We present ScatterMoE, an implementation of Sparse Mixture-of-Experts (SMoE)\non GPUs. ScatterMoE builds upon existing implementations, and overcoming some\nof the limitations to improve inference and training speed, and memory\nfootprint. This implementation achieves this by avoiding padding and making\nexcessive copies of the input. We introduce ParallelLinear, the main component\nwe use to build our implementation and the various kernels used to speed up the\noperation. We benchmark our implementation against Megablocks, and show that it\nenables a higher throughput and lower memory footprint. We also show how\nParallelLinear enables extension of the Mixture-of-Experts concept by\ndemonstrating with an implementation of Mixture of Attention.", + "authors": "Shawn Tan, Yikang Shen, Rameswar Panda, Aaron Courville", + "published": "2024-03-13", + "updated": "2024-03-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2008.09662v1", + "title": "Biased Mixtures Of Experts: Enabling Computer Vision Inference Under Data Transfer Limitations", + "abstract": "We propose a novel mixture-of-experts class to optimize computer vision\nmodels in accordance with data transfer limitations at test time. Our approach\npostulates that the minimum acceptable amount of data allowing for\nhighly-accurate results can vary for different input space partitions.\nTherefore, we consider mixtures where experts require different amounts of\ndata, and train a sparse gating function to divide the input space for each\nexpert. By appropriate hyperparameter selection, our approach is able to bias\nmixtures of experts towards selecting specific experts over others. In this\nway, we show that the data transfer optimization between visual sensing and\nprocessing can be solved as a convex optimization problem.To demonstrate the\nrelation between data availability and performance, we evaluate biased mixtures\non a range of mainstream computer vision problems, namely: (i) single shot\ndetection, (ii) image super resolution, and (iii) realtime video action\nclassification. For all cases, and when experts constitute modified baselines\nto meet different limits on allowed data utility, biased mixtures significantly\noutperform previous work optimized to meet the same constraints on available\ndata.", + "authors": "Alhabib Abbas, Yiannis Andreopoulos", + "published": "2020-08-21", + "updated": "2020-08-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.IV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1704.00946v4", + "title": "Approximation results regarding the multiple-output mixture of linear experts model", + "abstract": "Mixture of experts (MoE) models are a class of artificial neural networks\nthat can be used for functional approximation and probabilistic modeling. An\nimportant class of MoE models is the class of mixture of linear experts (MoLE)\nmodels, where the expert functions map to real topological output spaces. There\nare a number of powerful approximation results regarding MoLE models, when the\noutput space is univariate. These results guarantee the ability of MoLE mean\nfunctions to approximate arbitrary continuous functions, and MoLE models\nthemselves to approximate arbitrary conditional probability density functions.\nWe utilize and extend upon the univariate approximation results in order to\nprove a pair of useful results for situations where the output spaces are\nmultivariate.", + "authors": "Hien D. Nguyen, Faicel Chamroukhi, Florence Forbes", + "published": "2017-04-04", + "updated": "2019-05-28", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.13833v2", + "title": "Mixtures of Gaussian process experts based on kernel stick-breaking processes", + "abstract": "Mixtures of Gaussian process experts is a class of models that can\nsimultaneously address two of the key limitations inherent in standard Gaussian\nprocesses: scalability and predictive performance. In particular, models that\nuse Dirichlet processes as gating functions permit straightforward\ninterpretation and automatic selection of the number of experts in a mixture.\nWhile the existing models are intuitive and capable of capturing\nnon-stationarity, multi-modality and heteroskedasticity, the simplicity of\ntheir gating functions may limit the predictive performance when applied to\ncomplex data-generating processes. Capitalising on the recent advancement in\nthe dependent Dirichlet processes literature, we propose a new mixture model of\nGaussian process experts based on kernel stick-breaking processes. Our model\nmaintains the intuitive appeal yet improve the performance of the existing\nmodels. To make it practical, we design a sampler for posterior computation\nbased on the slice sampling. The model behaviour and improved predictive\nperformance are demonstrated in experiments using six datasets.", + "authors": "Yuji Saikai, Khue-Dung Dang", + "published": "2023-04-26", + "updated": "2023-05-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.12830v1", + "title": "Mixtures of Gaussian Process Experts with SMC$^2$", + "abstract": "Gaussian processes are a key component of many flexible statistical and\nmachine learning models. However, they exhibit cubic computational complexity\nand high memory constraints due to the need of inverting and storing a full\ncovariance matrix. To circumvent this, mixtures of Gaussian process experts\nhave been considered where data points are assigned to independent experts,\nreducing the complexity by allowing inference based on smaller, local\ncovariance matrices. Moreover, mixtures of Gaussian process experts\nsubstantially enrich the model's flexibility, allowing for behaviors such as\nnon-stationarity, heteroscedasticity, and discontinuities. In this work, we\nconstruct a novel inference approach based on nested sequential Monte Carlo\nsamplers to simultaneously infer both the gating network and Gaussian process\nexpert parameters. This greatly improves inference compared to importance\nsampling, particularly in settings when a stationary Gaussian process is\ninappropriate, while still being thoroughly parallelizable.", + "authors": "Teemu H\u00e4rk\u00f6nen, Sara Wade, Kody Law, Lassi Roininen", + "published": "2022-08-26", + "updated": "2022-08-26", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2404.15045v1", + "title": "Multi-Head Mixture-of-Experts", + "abstract": "Sparse Mixtures of Experts (SMoE) scales model capacity without significant\nincreases in training and inference costs, but exhibits the following two\nissues: (1) Low expert activation, where only a small subset of experts are\nactivated for optimization. (2) Lacking fine-grained analytical capabilities\nfor multiple semantic concepts within individual tokens. We propose Multi-Head\nMixture-of-Experts (MH-MoE), which employs a multi-head mechanism to split each\ntoken into multiple sub-tokens. These sub-tokens are then assigned to and\nprocessed by a diverse set of experts in parallel, and seamlessly reintegrated\ninto the original token form. The multi-head mechanism enables the model to\ncollectively attend to information from various representation spaces within\ndifferent experts, while significantly enhances expert activation, thus deepens\ncontext understanding and alleviate overfitting. Moreover, our MH-MoE is\nstraightforward to implement and decouples from other SMoE optimization\nmethods, making it easy to integrate with other SMoE models for enhanced\nperformance. Extensive experimental results across three tasks: English-focused\nlanguage modeling, Multi-lingual language modeling and Masked multi-modality\nmodeling tasks, demonstrate the effectiveness of MH-MoE.", + "authors": "Xun Wu, Shaohan Huang, Wenhui Wang, Furu Wei", + "published": "2024-04-23", + "updated": "2024-04-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.13850v2", + "title": "Statistical Perspective of Top-K Sparse Softmax Gating Mixture of Experts", + "abstract": "Top-K sparse softmax gating mixture of experts has been widely used for\nscaling up massive deep-learning architectures without increasing the\ncomputational cost. Despite its popularity in real-world applications, the\ntheoretical understanding of that gating function has remained an open problem.\nThe main challenge comes from the structure of the top-K sparse softmax gating\nfunction, which partitions the input space into multiple regions with distinct\nbehaviors. By focusing on a Gaussian mixture of experts, we establish\ntheoretical results on the effects of the top-K sparse softmax gating function\non both density and parameter estimations. Our results hinge upon defining\nnovel loss functions among parameters to capture different behaviors of the\ninput regions. When the true number of experts $k_{\\ast}$ is known, we\ndemonstrate that the convergence rates of density and parameter estimations are\nboth parametric on the sample size. However, when $k_{\\ast}$ becomes unknown\nand the true model is over-specified by a Gaussian mixture of $k$ experts where\n$k > k_{\\ast}$, our findings suggest that the number of experts selected from\nthe top-K sparse softmax gating function must exceed the total cardinality of a\ncertain number of Voronoi cells associated with the true parameters to\nguarantee the convergence of the density estimation. Moreover, while the\ndensity estimation rate remains parametric under this setting, the parameter\nestimation rates become substantially slow due to an intrinsic interaction\nbetween the softmax gating and expert functions.", + "authors": "Huy Nguyen, Pedram Akbarian, Fanqi Yan, Nhat Ho", + "published": "2023-09-25", + "updated": "2024-02-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1612.06879v1", + "title": "Robust mixture of experts modeling using the skew $t$ distribution", + "abstract": "Mixture of Experts (MoE) is a popular framework in the fields of statistics\nand machine learning for modeling heterogeneity in data for regression,\nclassification and clustering. MoE for continuous data are usually based on the\nnormal distribution. However, it is known that for data with asymmetric\nbehavior, heavy tails and atypical observations, the use of the normal\ndistribution is unsuitable. We introduce a new robust non-normal mixture of\nexperts modeling using the skew $t$ distribution. The proposed skew $t$ mixture\nof experts, named STMoE, handles these issues of the normal mixtures experts\nregarding possibly skewed, heavy-tailed and noisy data. We develop a dedicated\nexpectation conditional maximization (ECM) algorithm to estimate the model\nparameters by monotonically maximizing the observed data log-likelihood. We\ndescribe how the presented model can be used in prediction and in model-based\nclustering of regression data. Numerical experiments carried out on simulated\ndata show the effectiveness and the robustness of the proposed model in fitting\nnon-linear regression functions as well as in model-based clustering. Then, the\nproposed model is applied to the real-world data of tone perception for musical\ndata analysis, and the one of temperature anomalies for the analysis of climate\nchange data. The obtained results confirm the usefulness of the model for\npractical data analysis applications.", + "authors": "Faicel Chamroukhi", + "published": "2016-12-09", + "updated": "2016-12-09", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG", + "stat.ML", + "62, 62F, 62H30, 62h", + "G.3; I.2.6; I.5.1" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.04693v2", + "title": "GraphMETRO: Mitigating Complex Graph Distribution Shifts via Mixture of Aligned Experts", + "abstract": "Graph data are inherently complex and heterogeneous, leading to a high\nnatural diversity of distributional shifts. However, it remains unclear how to\nbuild machine learning architectures that generalize to complex non-synthetic\ndistributional shifts naturally occurring in the real world. Here we develop\nGraphMETRO, a Graph Neural Network architecture, that reliably models natural\ndiversity and captures complex distributional shifts. GraphMETRO employs a\nMixture-of-Experts (MoE) architecture with a gating model and multiple expert\nmodels, where each expert model targets a specific distributional shift to\nproduce a shift-invariant representation, and the gating model identifies shift\ncomponents. Additionally, we design a novel objective that aligns the\nrepresentations from different expert models to ensure smooth optimization.\nGraphMETRO achieves state-of-the-art results on four datasets from GOOD\nbenchmark comprised of complex and natural real-world distribution shifts,\nimproving by 67% and 4.2% on WebKB and Twitch datasets.", + "authors": "Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, Jure Leskovec", + "published": "2023-12-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.11706v1", + "title": "Mixture of ELM based experts with trainable gating network", + "abstract": "Mixture of experts method is a neural network based ensemble learning that\nhas great ability to improve the overall classification accuracy. This method\nis based on the divide and conquer principle, in which the problem space is\ndivided between several experts by supervisition of gating network. In this\npaper, we propose an ensemble learning method based on mixture of experts which\nis named mixture of ELM based experts with trainable gating network (MEETG) to\nimprove the computing cost and to speed up the learning process of ME. The\nstructure of ME consists of multi layer perceptrons (MLPs) as base experts and\ngating network, in which gradient-based learning algorithm is applied for\ntraining the MLPs which is an iterative and time consuming process. In order to\novercome on these problems, we use the advantages of extreme learning machine\n(ELM) for designing the structure of ME. ELM as a learning algorithm for single\nhidden-layer feed forward neural networks provides much faster learning process\nand better generalization ability in comparision with some other traditional\nlearning algorithms. Also, in the proposed method a trainable gating network is\napplied to aggregate the outputs of the experts dynamically according to the\ninput sample. Our experimental results and statistical analysis on 11 benchmark\ndatasets confirm that MEETG has an acceptable performance in classification\nproblems. Furthermore, our experimental results show that the proposed approach\noutperforms the original ELM on prediction stability and classification\naccuracy.", + "authors": "Laleh Armi, Elham Abbasi, Jamal Zarepour-Ahmadabadi", + "published": "2021-05-25", + "updated": "2021-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2112.14397v2", + "title": "EvoMoE: An Evolutional Mixture-of-Experts Training Framework via Dense-To-Sparse Gate", + "abstract": "Mixture-of-experts (MoE) is becoming popular due to its success in improving\nthe model quality, especially in Transformers. By routing tokens with a sparse\ngate to a few experts (i.e., a small pieces of the full model), MoE can easily\nincrease the model parameters to a very large scale while keeping the\ncomputation cost in a constant level. Most existing works just initialize some\nrandom experts, set a fixed gating strategy (e.g., Top-k), and train the model\nfrom scratch in an ad-hoc way. We identify that these MoE models are suffering\nfrom the immature experts and unstable sparse gate, which are harmful to the\nconvergence performance. In this paper, we propose an efficient end-to-end MoE\ntraining framework called EvoMoE. EvoMoE starts from training one single expert\nand gradually evolves into a large and sparse MoE structure. EvoMoE mainly\ncontains two phases: the expert-diversify phase to train the base expert for a\nwhile and spawn multiple diverse experts from it, and the gate-sparsify phase\nto learn an adaptive sparse gate and activate a dynamic number of experts.\nEvoMoE naturally decouples the joint learning of both the experts and the\nsparse gate and focuses on learning the basic knowledge with a single expert at\nthe early training stage. Then it diversifies the experts and continues to\ntrain the MoE with a novel Dense-to-Sparse gate (DTS-Gate). Specifically,\ninstead of using a permanent sparse gate, DTS-Gate begins as a dense gate that\nroutes tokens to all experts, then gradually and adaptively becomes sparser\nwhile routes to fewer experts. Evaluations are conducted on three popular\nmodels and tasks, including RoBERTa for masked language modeling task, GPT for\nlanguage modeling task and Transformer for machine translation task. The\nresults show that EvoMoE outperforms existing baselines, including Switch, BASE\nLayer, Hash Layer and StableMoE.", + "authors": "Xiaonan Nie, Xupeng Miao, Shijie Cao, Lingxiao Ma, Qibin Liu, Jilong Xue, Youshan Miao, Yi Liu, Zhi Yang, Bin Cui", + "published": "2021-12-29", + "updated": "2022-10-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.12379v4", + "title": "Mixture of Cluster-conditional LoRA Experts for Vision-language Instruction Tuning", + "abstract": "Instruction tuning of Large Vision-language Models (LVLMs) has revolutionized\nthe development of versatile models with zero-shot generalization across a wide\nrange of downstream vision-language tasks. However, the diversity of training\ntasks of different sources and formats would lead to inevitable task conflicts,\nwhere different tasks conflict for the same set of model parameters, resulting\nin sub-optimal instructionfollowing abilities. To address that, we propose the\nMixture of Clusterconditional LoRA Experts (MoCLE), a novel Mixture of Experts\n(MoE) architecture designed to activate the task-customized model parameters\nbased on the instruction clusters. A separate universal expert is further\nincorporated to improve generalization capabilities of MoCLE for novel\ninstructions. Extensive experiments on 11 zero-shot tasks demonstrate the\neffectiveness of MoCLE.", + "authors": "Yunhao Gou, Zhili Liu, Kai Chen, Lanqing Hong, Hang Xu, Aoxue Li, Dit-Yan Yeung, James T. Kwok, Yu Zhang", + "published": "2023-12-19", + "updated": "2024-03-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05220v1", + "title": "On Parameter Estimation in Deviated Gaussian Mixture of Experts", + "abstract": "We consider the parameter estimation problem in the deviated Gaussian mixture\nof experts in which the data are generated from $(1 - \\lambda^{\\ast}) g_0(Y|\nX)+ \\lambda^{\\ast} \\sum_{i = 1}^{k_{\\ast}} p_{i}^{\\ast}\nf(Y|(a_{i}^{\\ast})^{\\top}X+b_i^{\\ast},\\sigma_{i}^{\\ast})$, where $X, Y$ are\nrespectively a covariate vector and a response variable, $g_{0}(Y|X)$ is a\nknown function, $\\lambda^{\\ast} \\in [0, 1]$ is true but unknown mixing\nproportion, and $(p_{i}^{\\ast}, a_{i}^{\\ast}, b_{i}^{\\ast}, \\sigma_{i}^{\\ast})$\nfor $1 \\leq i \\leq k^{\\ast}$ are unknown parameters of the Gaussian mixture of\nexperts. This problem arises from the goodness-of-fit test when we would like\nto test whether the data are generated from $g_{0}(Y|X)$ (null hypothesis) or\nthey are generated from the whole mixture (alternative hypothesis). Based on\nthe algebraic structure of the expert functions and the distinguishability\nbetween $g_0$ and the mixture part, we construct novel Voronoi-based loss\nfunctions to capture the convergence rates of maximum likelihood estimation\n(MLE) for our models. We further demonstrate that our proposed loss functions\ncharacterize the local convergence rates of parameter estimation more\naccurately than the generalized Wasserstein, a loss function being commonly\nused for estimating parameters in the Gaussian mixture of experts.", + "authors": "Huy Nguyen, Khai Nguyen, Nhat Ho", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2305.03288v2", + "title": "Demystifying Softmax Gating Function in Gaussian Mixture of Experts", + "abstract": "Understanding the parameter estimation of softmax gating Gaussian mixture of\nexperts has remained a long-standing open problem in the literature. It is\nmainly due to three fundamental theoretical challenges associated with the\nsoftmax gating function: (i) the identifiability only up to the translation of\nparameters; (ii) the intrinsic interaction via partial differential equations\nbetween the softmax gating and the expert functions in the Gaussian density;\n(iii) the complex dependence between the numerator and denominator of the\nconditional density of softmax gating Gaussian mixture of experts. We resolve\nthese challenges by proposing novel Voronoi loss functions among parameters and\nestablishing the convergence rates of maximum likelihood estimator (MLE) for\nsolving parameter estimation in these models. When the true number of experts\nis unknown and over-specified, our findings show a connection between the\nconvergence rate of the MLE and a solvability problem of a system of polynomial\nequations.", + "authors": "Huy Nguyen, TrungTin Nguyen, Nhat Ho", + "published": "2023-05-05", + "updated": "2023-10-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "math.ST", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05838v1", + "title": "Liu-type Shrinkage Estimators for Mixture of Poisson Regressions with Experts: A Heart Disease Study", + "abstract": "Count data play a critical role in medical research, such as heart disease.\nThe Poisson regression model is a common technique for evaluating the impact of\na set of covariates on the count responses. The mixture of Poisson regression\nmodels with experts is a practical tool to exploit the covariates, not only to\nhandle the heterogeneity in the Poisson regressions but also to learn the\nmixing structure of the population. Multicollinearity is one of the most common\nchallenges with regression models, leading to ill-conditioned design matrices\nof Poisson regression components and expert classes. The maximum likelihood\nmethod produces unreliable and misleading estimates for the effects of the\ncovariates in multicollinearity. In this research, we develop Ridge and\nLiu-type methods as two shrinkage approaches to cope with the ill-conditioned\ndesign matrices of the mixture of Poisson regression models with experts.\nThrough various numerical studies, we demonstrate that the shrinkage methods\noffer more reliable estimates for the coefficients of the mixture model in\nmulticollinearity while maintaining the classification performance of the ML\nmethod. The shrinkage methods are finally applied to a heart study to analyze\nthe heart disease rate stages.", + "authors": "Elsayed Ghanem, Moein Yoosefi, Armin Hatefi", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "stat.CO", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.09368v2", + "title": "Mixture-of-Experts with Expert Choice Routing", + "abstract": "Sparsely-activated Mixture-of-experts (MoE) models allow the number of\nparameters to greatly increase while keeping the amount of computation for a\ngiven token or a given sample unchanged. However, a poor expert routing\nstrategy (e.g. one resulting in load imbalance) can cause certain experts to be\nunder-trained, leading to an expert being under or over-specialized. Prior work\nallocates a fixed number of experts to each token using a top-k function\nregardless of the relative importance of different tokens. To address this, we\npropose a heterogeneous mixture-of-experts employing an expert choice method.\nInstead of letting tokens select the top-k experts, we have experts selecting\nthe top-k tokens. As a result, each token can be routed to a variable number of\nexperts and each expert can have a fixed bucket size. We systematically study\npre-training speedups using the same computational resources of the Switch\nTransformer top-1 and GShard top-2 gating of prior work and find that our\nmethod improves training convergence time by more than 2x. For the same\ncomputational cost, our method demonstrates higher performance in fine-tuning\n11 selected tasks in the GLUE and SuperGLUE benchmarks. For a smaller\nactivation cost, our method outperforms the T5 dense model in 7 out of the 11\ntasks.", + "authors": "Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, James Laudon", + "published": "2022-02-18", + "updated": "2022-10-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2206.00277v2", + "title": "Task-Specific Expert Pruning for Sparse Mixture-of-Experts", + "abstract": "The sparse Mixture-of-Experts (MoE) model is powerful for large-scale\npre-training and has achieved promising results due to its model capacity.\nHowever, with trillions of parameters, MoE is hard to be deployed on cloud or\nmobile environment. The inference of MoE requires expert parallelism, which is\nnot hardware-friendly and communication expensive. Especially for\nresource-limited downstream tasks, such sparse structure has to sacrifice a lot\nof computing efficiency for limited performance gains. In this work, we observe\nmost experts contribute scarcely little to the MoE fine-tuning and inference.\nWe further propose a general method to progressively drop the non-professional\nexperts for the target downstream task, which preserves the benefits of MoE\nwhile reducing the MoE model into one single-expert dense model. Our\nexperiments reveal that the fine-tuned single-expert model could preserve 99.3%\nbenefits from MoE across six different types of tasks while enjoying 2x\ninference speed with free communication cost.", + "authors": "Tianyu Chen, Shaohan Huang, Yuan Xie, Binxing Jiao, Daxin Jiang, Haoyi Zhou, Jianxin Li, Furu Wei", + "published": "2022-06-01", + "updated": "2022-06-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02410v1", + "title": "Mixture of Quantized Experts (MoQE): Complementary Effect of Low-bit Quantization and Robustness", + "abstract": "Large Mixture of Experts (MoE) models could achieve state-of-the-art quality\non various language tasks, including machine translation task, thanks to the\nefficient model scaling capability with expert parallelism. However, it has\nbrought a fundamental issue of larger memory consumption and increased memory\nbandwidth bottleneck at deployment time. In this paper, we propose Mixture of\nQuantized Experts (MoQE) which is a simple weight-only quantization method\napplying ultra low-bit down to 2-bit quantizations only to expert weights for\nmitigating the increased memory and latency issues of MoE models. We show that\nlow-bit quantization together with the MoE architecture delivers a reliable\nmodel performance while reducing the memory size significantly even without any\nadditional training in most cases. In particular, expert layers in MoE models\nare much more robust to the quantization than conventional feedforward networks\n(FFN) layers. In our comprehensive analysis, we show that MoE models with 2-bit\nexpert weights can deliver better model performance than the dense model\ntrained on the same dataset. As a result of low-bit quantization, we show the\nmodel size can be reduced by 79.6% of the original half precision floating\npoint (fp16) MoE model. Combined with an optimized GPU runtime implementation,\nit also achieves 1.24X speed-up on A100 GPUs.", + "authors": "Young Jin Kim, Raffy Fahim, Hany Hassan Awadalla", + "published": "2023-10-03", + "updated": "2023-10-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1911.08151v2", + "title": "Retrospective and Prospective Mixture-of-Generators for Task-oriented Dialogue Response Generation", + "abstract": "Dialogue response generation (DRG) is a critical component of task-oriented\ndialogue systems (TDSs). Its purpose is to generate proper natural language\nresponses given some context, e.g., historical utterances, system states, etc.\nState-of-the-art work focuses on how to better tackle DRG in an end-to-end way.\nTypically, such studies assume that each token is drawn from a single\ndistribution over the output vocabulary, which may not always be optimal.\nResponses vary greatly with different intents, e.g., domains, system actions.\n We propose a novel mixture-of-generators network (MoGNet) for DRG, where we\nassume that each token of a response is drawn from a mixture of distributions.\nMoGNet consists of a chair generator and several expert generators. Each expert\nis specialized for DRG w.r.t. a particular intent. The chair coordinates\nmultiple experts and combines the output they have generated to produce more\nappropriate responses. We propose two strategies to help the chair make better\ndecisions, namely, a retrospective mixture-of-generators (RMoG) and prospective\nmixture-of-generators (PMoG). The former only considers the historical\nexpert-generated responses until the current time step while the latter also\nconsiders possible expert-generated responses in the future by encouraging\nexploration. In order to differentiate experts, we also devise a\nglobal-and-local (GL) learning scheme that forces each expert to be specialized\ntowards a particular intent using a local loss and trains the chair and all\nexperts to coordinate using a global loss.\n We carry out extensive experiments on the MultiWOZ benchmark dataset. MoGNet\nsignificantly outperforms state-of-the-art methods in terms of both automatic\nand human evaluations, demonstrating its effectiveness for DRG.", + "authors": "Jiahuan Pei, Pengjie Ren, Christof Monz, Maarten de Rijke", + "published": "2019-11-19", + "updated": "2020-02-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05526v1", + "title": "Buffer Overflow in Mixture of Experts", + "abstract": "Mixture of Experts (MoE) has become a key ingredient for scaling large\nfoundation models while keeping inference costs steady. We show that expert\nrouting strategies that have cross-batch dependencies are vulnerable to\nattacks. Malicious queries can be sent to a model and can affect a model's\noutput on other benign queries if they are grouped in the same batch. We\ndemonstrate this via a proof-of-concept attack in a toy experimental setting.", + "authors": "Jamie Hayes, Ilia Shumailov, Itay Yona", + "published": "2024-02-08", + "updated": "2024-02-08", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1901.10668v2", + "title": "Doubly Sparse: Sparse Mixture of Sparse Experts for Efficient Softmax Inference", + "abstract": "Computations for the softmax function are significantly expensive when the\nnumber of output classes is large. In this paper, we present a novel softmax\ninference speedup method, Doubly Sparse Softmax (DS-Softmax), that leverages\nsparse mixture of sparse experts to efficiently retrieve top-k classes.\nDifferent from most existing methods that require and approximate a fixed\nsoftmax, our method is learning-based and can adapt softmax weights for a\nbetter inference speedup. In particular, our method learns a two-level\nhierarchy which divides entire output class space into several partially\noverlapping experts. Each expert is sparse and only contains a subset of output\nclasses. To find top-k classes, a sparse mixture enables us to find the most\nprobable expert quickly, and the sparse expert enables us to search within a\nsmall-scale softmax. We empirically conduct evaluation on several real-world\ntasks, including neural machine translation, language modeling and image\nclassification, and demonstrate that significant computation reductions can be\nachieved at no performance loss.", + "authors": "Shun Liao, Ting Chen, Tian Lin, Denny Zhou, Chong Wang", + "published": "2019-01-30", + "updated": "2019-07-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2207.09094v1", + "title": "MoEC: Mixture of Expert Clusters", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its\npromising scaling capability with affordable computational overhead. MoE\nconverts dense layers into sparse experts, and utilizes a gated routing network\nto make experts conditionally activated. However, as the number of experts\ngrows, MoE with outrageous parameters suffers from overfitting and sparse data\nallocation. Such problems are especially severe on tasks with limited data,\nthus hindering the progress for MoE models to improve performance by scaling\nup. In this work, we propose Mixture of Expert Clusters - a general approach to\nenable expert layers to learn more diverse and appropriate knowledge by\nimposing variance-based constraints on the routing stage. We further propose a\ncluster-level expert dropout strategy specifically designed for the expert\ncluster structure. Our experiments reveal that MoEC could improve performance\non machine translation and natural language understanding tasks, and raise the\nperformance upper bound for scaling up experts under limited data. We also\nverify that MoEC plays a positive role in mitigating overfitting and sparse\ndata allocation.", + "authors": "Yuan Xie, Shaohan Huang, Tianyu Chen, Furu Wei", + "published": "2022-07-19", + "updated": "2022-07-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1703.09302v1", + "title": "Speech Enhancement using a Deep Mixture of Experts", + "abstract": "In this study we present a Deep Mixture of Experts (DMoE) neural-network\narchitecture for single microphone speech enhancement. By contrast to most\nspeech enhancement algorithms that overlook the speech variability mainly\ncaused by phoneme structure, our framework comprises a set of deep neural\nnetworks (DNNs), each one of which is an 'expert' in enhancing a given speech\ntype corresponding to a phoneme. A gating DNN determines which expert is\nassigned to a given speech segment. A speech presence probability (SPP) is then\nobtained as a weighted average of the expert SPP decisions, with the weights\ndetermined by the gating DNN. A soft spectral attenuation, based on the SPP, is\nthen applied to enhance the noisy speech signal. The experts and the gating\ncomponents of the DMoE network are trained jointly. As part of the training,\nspeech clustering into different subsets is performed in an unsupervised\nmanner. Therefore, unlike previous methods, a phoneme-labeled database is not\nrequired for the training procedure. A series of experiments with different\nnoise types verified the applicability of the new algorithm to the task of\nspeech enhancement. The proposed scheme outperforms other schemes that either\ndo not consider phoneme structure or use a simpler training methodology.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2017-03-27", + "updated": "2017-03-27", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17404v1", + "title": "Generalization Error Analysis for Sparse Mixture-of-Experts: A Preliminary Study", + "abstract": "Mixture-of-Experts (MoE) represents an ensemble methodology that amalgamates\npredictions from several specialized sub-models (referred to as experts). This\nfusion is accomplished through a router mechanism, dynamically assigning\nweights to each expert's contribution based on the input data. Conventional MoE\nmechanisms select all available experts, incurring substantial computational\ncosts. In contrast, Sparse Mixture-of-Experts (Sparse MoE) selectively engages\nonly a limited number, or even just one expert, significantly reducing\ncomputation overhead while empirically preserving, and sometimes even\nenhancing, performance. Despite its wide-ranging applications and these\nadvantageous characteristics, MoE's theoretical underpinnings have remained\nelusive. In this paper, we embark on an exploration of Sparse MoE's\ngeneralization error concerning various critical factors. Specifically, we\ninvestigate the impact of the number of data samples, the total number of\nexperts, the sparsity in expert selection, the complexity of the routing\nmechanism, and the complexity of individual experts. Our analysis sheds light\non \\textit{how \\textbf{sparsity} contributes to the MoE's generalization},\noffering insights from the perspective of classical learning theory.", + "authors": "Jinze Zhao, Peihao Wang, Zhangyang Wang", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2303.06318v2", + "title": "A Hybrid Tensor-Expert-Data Parallelism Approach to Optimize Mixture-of-Experts Training", + "abstract": "Mixture-of-Experts (MoE) is a neural network architecture that adds sparsely\nactivated expert blocks to a base model, increasing the number of parameters\nwithout impacting computational costs. However, current distributed deep\nlearning frameworks are limited in their ability to train high-quality MoE\nmodels with large base models. In this work, we present DeepSpeed-TED, a novel,\nthree-dimensional, hybrid parallel algorithm that combines data, tensor, and\nexpert parallelism to enable the training of MoE models with 4 to 8x larger\nbase models than the current state-of-the-art. We also describe memory\noptimizations in the optimizer step, and communication optimizations that\neliminate unnecessary data movement. We implement our approach in DeepSpeed and\nachieve speedups of 26% over a baseline (i.e. without our communication\noptimizations) when training a 40 billion parameter MoE model (6.7 billion base\nmodel with 16 experts) on 128 V100 GPUs.", + "authors": "Siddharth Singh, Olatunji Ruwase, Ammar Ahmad Awan, Samyam Rajbhandari, Yuxiong He, Abhinav Bhatele", + "published": "2023-03-11", + "updated": "2023-05-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC", + "cs.PF" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + } + ], + [ + { + "url": "http://arxiv.org/abs/2401.14361v1", + "title": "MoE-Infinity: Activation-Aware Expert Offloading for Efficient MoE Serving", + "abstract": "This paper presents MoE-Infinity, a cost-efficient mixture-of-expert (MoE)\nserving system that realizes activation-aware expert offloading. MoE-Infinity\nfeatures sequence-level expert activation tracing, a new approach adept at\nidentifying sparse activations and capturing the temporal locality of MoE\ninference. By analyzing these traces, MoE-Infinity performs novel\nactivation-aware expert prefetching and caching, substantially reducing the\nlatency overheads usually associated with offloading experts for improved cost\nperformance. Extensive experiments in a cluster show that MoE-Infinity\noutperforms numerous existing systems and approaches, reducing latency by 4 -\n20X and decreasing deployment costs by over 8X for various MoEs. MoE-Infinity's\nsource code is publicly available at https://github.com/TorchMoE/MoE-Infinity", + "authors": "Leyang Xue, Yao Fu, Zhan Lu, Luo Mai, Mahesh Marina", + "published": "2024-01-25", + "updated": "2024-01-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.PF" + ], + "label": "Original Paper", + "paper_cat": "Mixture AND of AND Experts", + "gt": "Offloading-supported ML systems. In addition to the offloading systems evaluated above, we have also identified other related offloading-supported ML systems. vLLM [19] supports KV-cache offloading, which is complementary to expert offloading in MOE-INFINITY. The majority of offloading systems are designed for dense models (e.g., FlexGen [34], DeepPlan [14], and SwapAdvisor [12]), and they cannot support MoE inference without substantial extension. Tensor migration (e.g., DeepUM [16], Sentinel [30]) optimizes page migration overhead in unified memory, and MOE-INFINITY is complementary to these approaches. Model compression. Model compression is another popular approach to reduce model deployment costs. Typical compression techniques include quantization (e.g., MoQE [17], QMoE [8]) and expert pruning [4]. MOE-INFINITY is complementary with these techniques. Expert parallelism. MoE systems often adopt expert parallelism (EP) for distributed execution. Static EP planning often specifies expert location on initialization (e.g., DeepSpeedMoE [28], GShard [20], Lina [21]). Dynamic EP planning computes the location of experts at runtime (e.g., Janus [23], SmartMoE [40]). We leave the joint optimization of expert offloading and parallelism for future work. Workload-aware ML systems. ML systems can often benefit from being aware of workloads [24], thereby improving their performance. For instance, KungFu [25] explores adaptive deep learning model training, and Quiver [37] investigates how to leverage workload awareness in graph neural network serving. MOE-INFINITY is designed for MoE models, leading to different tracing, prefetching, and caching methods.", + "pre_questions": [], + "main_content": "Introduction Mixture-of-Experts (MoE) models are increasingly used in AI services, such as chatbots [2,15] and machine translation [5]. An MoE model often takes sequences (collections of tokens) as inputs and processes them using MoE layers, which contain routers that distribute tokens among several small neural networks, named \u2018experts\u2019. Each expert handles a portion of the tokens. The token processing typically involves a generative inference procedure. A major challenge in deploying MoE models is their high memory costs. MoEs typically have far more parameters than dense transformer models. For instance, Google\u2019s Switch Transformers [7] have up to 1.5 trillion parameters distributed across 61,440 experts. Recent reports suggest that OpenAI\u2019s GPT-4 might implement MoE architectures with trillions of parameters. To accommodate this, an MoE system needs a significant number of GPUs to provide the required memory. This demand results in high deployment costs, as reported by leading MoE users such as OpenAI [1]. A promising approach to reduce the memory usage of MoE serving is parameter offloading [29,31]. In this approach, a serving system can store the parameters of an MoE on SSDs or host memory. These parameters are transferred to the GPU via PCIe only when necessary, thus alleviating memory constraints on the GPU. However, despite its potential, systems with offloading tend to have significant latency issues. This presents a challenge for interactive AI services, where meeting per-token-latency requirements within a very short time frame is critical, such as 100 milliseconds in recommender systems [35] and 1 second in chatbot applications [19,22]. Our objective is to explore a serving system design that can reduce the latency overhead of offloading MoE parameters. Our design is inspired by an important observation in MoE deployment: to maintain low latency in a resource-constrained environment, serving systems usually generate small batches for input sequences. For instance, on a server with 8 GPUs, serving systems such as AlpaServe [22], and Orca [39] typically have batch sizes under 32, which is much smaller than the large batches used in training [3, 18]. When processing small batches, we observe that MoE tends to exhibit sparse activation and temporal locality, where only a limited number of experts are repetitively activated in a generative inference process. Leveraging this observation, we propose a new design termed \u201cactivation-aware expert offloading\u201d. This design involves tracing expert activation during inference. Through trace analysis, we can predict which experts will be repetitively activated, allowing for effective prefetching and caching of experts in an offloading system, improving its latency performance. To make the proposed design feasible, we need to address several open concerns. In existing offloading-supported systems, tracing methods are tailored for dense transformer models (e.g., ZeRO-Offload [31]) or models with dynamic control flows (e.g., BrainStorm [6]). However, when applied to MoEs, these methods accumulate expert statistics over extended serving periods and fail to capture the necessary sparse activation and temporal locality, which is only visible at the sequence level during MoE inference. Additionally, existing prefetching strategies, such as those in ZeRO-Infinity [29] and BrainStorm [6], do not account for the sparse activation 1 arXiv:2401.14361v1 [cs.LG] 25 Jan 2024 of experts, resulting in indiscriminate prefetching of all experts and leading to unnecessary and excessive prefetching traffic. Finally, existing caching approaches (e.g., LFU [6,35] and LRU [16,29,30]) are not designed for leveraging sparse activation and temporal locality in MoEs, which results in low cache performance. In this paper, we introduce MOE-INFINITY, a new MoE serving system which realizes activation-aware expert offloading. Our contributions in designing MOE-INFINITY include: (1) Sequence-level expert activation tracing. MOEINFINITY features a new approach, termed sequence-level expert activation tracing, for recording the sparse activation of experts during inference. The insight behind this approach is that, for a given sequence, MoE systems tend to activate experts in a pattern that exhibits temporal locality (often termed as the temporal locality in MoE designs [15]). However, these patterns become less discernible when the tracing data is aggregated across multiple sequences. To address this, we\u2019ve developed the Expert Activation Matrix (EAM), which logs which experts are activated during the generative inference process for individual sequences. For handling multiple sequences, we record separate EAMs. Additionally, we design an EAM collection construction algorithm which optimizes the selection of EAMs to represent distinct expert activation patterns. (2) Activation-aware expert prefetching. MOE-INFINITY further features a new approach for prefetching offloaded parameters, termed activation-aware expert prefetching. The unique aspect of this approach lies in its ability to enable an MoE serving system to progressively enhance its prefetching priorities for experts during a generative inference process. The priority computation further reflects two factors inherent to MoE: (i) It prioritizes prefetching for experts that are predicted to have higher activation ratios, and (ii) It focuses on prefetching experts that are nearer to the layer currently being executed. With such a priority design, MOE-INFINITY can significantly reduce prefetching traffic and start prefetching timely. (3) Activation-aware expert caching. MOE-INFINITY also features an approach for caching experts, termed activationaware expert caching. This approach stands out from traditional caching methods in two key ways. First, it gives priority to experts who have shown higher activation ratios in processing the current sequence. Second, it favours experts located in the initial layers of the MoE, as these are less likely to benefit from prefetching. With this MoE-specific cache design, MOE-INFINITY can substantially increase the cache hit ratio, effectively improving the expert cache\u2019s performance. We evaluated the performance of MOE-INFINITY in serving MoEs in a GPU cluster (up to 24 GPUs). We include MoE models such as Switch Transformers [7] and NLLB [5] and inference workloads modelled after the Azure Trace [27]. The inference requests came from a range of datasets, including KV cache MoE Model \u2026 EOS? Input tokens Output tokens Next token Yes No E[1, 1] E[1, 2] R1 E[2, 1] E[2, 2] E[3, 1] E[3, 2] R2 R3 MoE Layer 1 MoE Layer 2 MoE Layer 3 \"Hello\" \u201care\" \"How\" \u201cto\" \u201cNice\" Sequence 1 Sequence 2 Sequence 3 Batch Token Iterative Generation Figure 1: Example of MoE model serving. FLAN [38], BIGBench [36] and MMLU [11]. In comparison to state-of-the-art (SOTA) serving systems (including Zero-Inifity, Zero-Offload, and PyTorch with CUDA Unified Memory), MOE-INFINITY outperforms them by 20\u00d7 and 4\u00d7 in latency, and it further shows 8\u00d7 reduction in deployment cost. Additionally, we conducted detailed micro-benchmarks to assess various components of MOE-INFINITY, focusing on expert prefetching, caching, and tracing. In these tests, MOE-INFINITY outperformed the existing prefetching methods used in leading systems including DeepSpeed and BrainStorm, as well as the caching methods used in SOTA MoE deployment systems. 2 Background 2.1 Serving MoE Models We illustrate the MoE model using an example shown in Figure 1. The serving system receives multiple user prompts, expressed as sequences. These sequences can be grouped as a batch for execution. Each sequence consists of multiple tokens. The MoE model takes batched input tokens to generate output tokens iteratively. For each sequence, a forward iteration predicts one new token until an EOS token is generated. Due to the presence of KV cache, from the second forward iteration, the MoE model only takes the last token predicted as input. In each forward iteration, the input tokens are processed by multiple MoE layers where each layer consists of a router and multiple experts. For each input token, the router decides which experts will process it. An MoE serving system must optimize its latency performance, measured through \u2018per-token latency\u2019. This latency metric is the duration an MoE model takes to complete a forward iteration and generate an output token. The output token is promptly delivered to a system user, directly impacting the user experience. To achieve low latency, MoE serving systems often require substantial memory resources. For example, Switch Transformers can require up to 6TB of memory to store all parameters, necessitating 72 A100-80GB GPUs. Typically, the majority of the memory is assigned to host expert\u2019s param2 eters. For instance, in a Switch Transformer, its dense part contributes to less than 1% of total parameters and over 99% of the parameters are attributed to experts. 2.2 Parameter Offloading A promising method to reduce the cost of deploying MoE models is through parameter offloading. Typically, this involves offloading experts onto an SSD. The serving system then performs generative inference for input sequences. The experts are only fetched into the GPU when needed and are later removed to make space for incoming experts. This strategy helps overcome the limitations of GPU memory capacity. Unlike other cost-saving techniques such as quantization and distillation, parameter offloading retains exact model accuracy while reducing the reliance on expensive GPU memory. However, its practical application in serving scenarios is often restricted due to significant latency overheads. For example, state-of-the-art offloading-supported systems like Zero-Infinity and Zero-Offload can introduce substantial latency. As our evaluation (Section 8) will show, after enabling offloading in serving switch transformers, these systems can increase latency to over 20s Several factors contribute to this high latency overhead: (1) Slow on-demand fetching and excessive prefetching. Existing offloading-supported systems depend on slow ondemand fetching of parameters into GPU memory. Although prefetching is an option, it is designed for dense transformer models (ZeRO-Infinity [29] and FlexGen [34]) or those with dynamic control flows (BrainStorm [6]). As a result, they do not account for the activation of experts when processing a specific sequence. Consequently, they end up prefetching all parameters, causing excessive prefetching traffic and suffering from bandwidth bottleneck on PCIe. (2) Low cache performance. Existing systems might cache MoE parameters in GPU and host memory. However, the caching strategies they employ are based on those for operating systems (e.g., CUDA Unified Memory) or dense transformer models (e.g., BrainStorm [6] and Zero-Offload [31]). As a result, their caching strategies, such as Least-RecentUsed (LRU) and Least-Frequency-Used (LFU), are not aware of the activation of experts, leading to low cache hit ratios. While newer MoE-optimized systems [2] consider the position of experts while caching, they cannot analyze expert activation for a currently processed sequence. Our evaluations demonstrate that this leads to subpar cache performance. 3 Activation Aware Expert Offloading To address the latency issues with offloading, we propose a new design approach termed \"Activation Aware Expert Offloading\" and realize it in MOE-INFINITY. In the following, we present its design intuition and overview. MoE Checkpoint Sequence-Level Expert Activation Tracing EAMC EAM Generative Inference Runtime Relevant Dataset Activation-Aware Expert Prefetching Activation-Aware Expert Caching Prefetch Decisions Cache Decisions MoE-Infinity 1 2 3 Inference Request Inference Response Figure 2: Design overview. Intuition. Our approach is grounded in the observation that serving systems often handle small batches of input sequences. In such scenarios, an MoE system tends to show \u201csparse activation\u201d and \u201ctemporal locality\u201d, meaning only a few experts are repetitively activated for processing a specific sequence. We substantiated the concepts of sparse activation and temporal locality through a study on real-world traces, the details of which will be discussed in Section 8. The trace shows that serving systems typically process batches ranging from 1 to 32 inputs under a one-second latency constraint. With these batches, we typically observe 3%-20% experts activated and 30%-46% experts used more than once in the generative inference process. This trend persists even when batch sizes increase to 64: we observe 26% experts being activated and 56% of these experts are reused at least once. Based on these observations, our approach involves tracking the activation patterns of experts for different sequences. Tracking for each individual sequence is crucial, as an aggregation of all sequences would lead to uniform activation patterns, losing the detailed view of the expert\u2019s sparse activation and temporal locality. We use this tracking data to predict which experts are likely to be activated when progressing in an inference procedure. This allows for timely prefetching of the necessary experts. Additionally, when managing the cache for experts, we prioritize experts that are either likely to be reused or unlikely to be effectively prefetched, improving the cache\u2019s performance. Overview. Figure 2 illustrates the components developed for realizing activation-aware expert offloading in MOEINFINITY. The process begins with providing an MoE checkpoint and a relevant dataset to MOE-INFINITY. A key feature of MOE-INFINITY is the sequence-level expert activation tracing component ( 1 ). This component analyzes which experts are activated concerning each individual sequence during the inference process. It generates a collection of Expert Activation Matrices (EAMs), referred to as EAMC (Expert Activation Matrix Collection). The EAMC is optimized to represent various expert activation patterns observed in the trace. Upon preparing the EAMC, MOE-INFINITY leverages the MoE checkpoint to set up a generative inference runtime. The runtime handles ingested inference requests, and it collaborates with the sequence-level tracing component to maintain an EAM reflecting the ongoing generative inference pro3 cess. MOE-INFINITY incorporates an activation-aware expert prefetching component ( 2 ) that assesses the current EAM alongside the EAMC. This assessment guides the prefetching decisions of experts from SSD to host memory and then to the GPU. Additionally, MOE-INFINITY employs an activationaware expert caching component ( 3 ). This component evaluates the current EAM to constantly refine the cache decisions for experts in both the GPU and host memory. 4 Sequence-Level Expert Activation Tracing This section presents the details of designing and implementing sequence-level expert activation tracing. 4.1 Motivating Examples We present motivating examples to elucidate the design rationale behind the sequence-level expert activation tracing. Consider tracing the activation of experts while processing sequences depicted in Figure 1. We employ EAM for this purpose, resulting in EAM1, EAM2, and EAM3 as illustrated in Figure 3. In an EAM, each cell represents the count of expert activations for a specific sequence, calculated as the sum of (i) the number of tokens in the prompt and (ii) the number of output tokens generated during the inference process. A key aspect demonstrated through these examples is the sparse activation and temporal locality. For instance, in EAM2, sparse activation is evident as only one expert per layer is activated for a sequence. Furthermore, in EAM3, the temporal locality is evident since the same expert in each layer is reused four times for processing the sequence. Considering multiple sequences, existing tracing methods like LFU tend to aggregate counts across EAMs. For instance, they might sum EAM2 and EAM3 into an aggregated matrix. However, this approach obscures the nuances of temporal locality and sparse activation. Thus, maintaining separate EAMs in the trace is crucial for preserving these characteristics. Additionally, we observe a significant similarity among EAMs. For example, EAM1 and EAM2 exhibit high resemblance, as both activate the same experts on certain layers (expert 2 on layer 2 and expert 1 on layer 3). This similarity implies that representing different activation patterns doesn\u2019t require recording every possible pattern. Instead, selecting a representative set of EAMs suffices. 4.2 Expert Activation Matrix Collection We formally define EAM as follows: for a model with L MoE layers and E experts per layer, given a sequence of n tokens, an EAM M is an L \u00d7 E matrix where M[i][ j] is the number of tokens routed to expert ei, j, i.e., the expert with index j at layer i. A row in EAM, i.e., M[i], represents the expert activation of layer i and we have \u2211j M[i][ j] = n \u2200i. Sparse Activation 2 0 2 0 0 2 EAM1 MoE \u201cHello\u201d \u201cWorld\u201d 0 3 3 0 0 3 4 0 0 4 4 0 EAM2 EAM3 \u201cHow\u201d, \u201care\u201d \u201cyou\u201d \u201cNice\u201d, \u201cto\u201d \u201cmeet\u201d, \u201cyou\u201d MoE MoE Temporal Locality #Experts #Layers Figure 3: Motivating examples. We further define EAMC as follows: Given N sequences, EAMC is a set of EAMs with fixed capacity P where P \u226aN. The objective of the EAMC is to find P EAMs out of N where each EAM represents a set of similar EAMs. For computing similarity, we need to compute the distance between EAMs. The distance definition needs to meet two requirements: (i) capturing the position difference that reflects the per-expert activation, and (ii) independent of the number of tokens in the sequence. To meet this, we define the distance between any two EAMs M1,M2 in Equation (1): 1\u22121 L \u2211 l\u2208[0,L) CosineSimilarity \u0012 M1[l] \u2211M1[l], M2[l] \u2211M2[l] \u0013 (1) where M1[l] is the lth row of matrix M1 and the summation computes the average cosine similarity across all layers. EAMC construction. To initialize the EAMC, we have the following process: Given a dataset with N sequences and a capacity P, we aim to find the P EAMs to form an EAMC where the EAMs are closest to the centroids of the P clusters. The construction process uses the K-Means clustering, with its key steps shown below: (i) EAM initialization: We find a relevant dataset that follows the same distribution of the inference requests. Often, we choose the validation dataset or the fine-tuning dataset as they often show the same distribution with the inference requests. For each sequence in the dataset, we record its EAM. (ii) Clustering EAMs: When applying the K-Means algorithm, we specify the number of clusters as the capacity of the EAMC with the distance defined by Equation 1. For each cluster, the EAM that is closest to the centroid is stored in the EAMC. 4.3 Practical Concerns Theoretical evidence for sequence-level tracing. In addition to the empirical evidence that supports the need for sequencelevel tracing, we also identify numerous theoretical evidence. MoEs, by design, are trained to exhibit sparse activation when processing sequences; otherwise, they cannot exhibit saving in computational costs compared to dense alternatives [33]. Additionally, state-of-the-art MoE models, Mixtral [15], reveal that temporal locality is pervasive in inference, determined by the way how MoE is designed and trained. Finally, leading practitioners with MoEs also report that, when processing a 4 large number of sequences, expert activation exhibits a uniform distribution (otherwise unactivated experts become useless over an extended serving period) [7], revealing the need to conduct sequence-level tracing of expert activation. EAMC size. We were concerned about the potential capacity limitations of EAMC, prohibiting its use in practical environments. However, we discovered that for all the datasets we are processing, we require only hundreds of EAMs in the EAMC, enough to demonstrate satisfactory performance (with more details reported in Section 8). Additionally, there is theoretical evidence suggesting that the size of EAMC is often constrained, stemming from the way routers are trained to create groups of experts in processing different types of inputs [7]. Since the types of inputs are limited, the number of possible combinations of experts is also finite. Handling distribution shift. As we discussed above, EAMC is constructed using a dataset provided by the user offline. In the online serving scenario, the distribution of EAMs stored in the EAMC might differ from the requests processed online (termed as distribution shift). In such cases, we have implemented mechanisms in MOE-INFINITY to update the EAMC: recording input sequences with insufficient performance, and adding these sequences to the dataset used for reconstructing the EAMC later. The reconstruction can be achieved online or in an offline fashion. Our empirical experiments show that the EAMC can quickly adapt to the new distribution, usually after 10 13 EAMs when handling real-world datasets and MoEs (with more details reported in Section 8). 5 Activation-Aware Expert Prefetching This section presents the details of designing and implementing activation-aware expert prefetching. 5.1 Expert prefetching in generative inference We aim to enable expert prefetching in an MoE\u2019s generative inference process. Our aims include: (i) To prioritize prefetching requests by tracking expert activation during the inference procedure and continuously adjusting the priority based on the tracking information combined with collected activation traces. (ii) To effectively manage situations where prefetching requests have not yet been completed when the experts are needed for execution. To achieve these goals, we designed \"MoE Inference with Expert Prefetching\" within MOE-INFINITY, with its key steps outlined in Algorithm 1. To respond to an inference request, MOE-INFINITY uses an MoE model, a buffer for input and output tokens, EAMC, and a priority queue for submitting expert prefetching requests. For experts in the priority queue, MOE-INFINITY fetches the experts from the external memory into the GPU memory in a sequential order, avoiding concurrent expert fetching which incurs bandwidth contention. Algorithm 1 Generative Inference with Expert Prefetching In: MoE m, Token Buffer b, EAM[] eamc, Priority Queue q 1: procedure INFERENCE(m, b, eamc, q) 2: cur_eam = NEWEAM(m.n_layers, m.n_experts, 0) 3: while b.last() != EOS do 4: for l in m.layers do 5: expert_token_map = l.route(b) 6: for e, t in expert_token_map do 7: cur_eam[l.idx][e.idx] += t.size() 8: PREFETCH(m, cur_eam, eamc, l, q) 9: for e, t in expert_token_map do 10: if !e.ready() then 11: q.submit(e, MAX_PRIORITY) 12: e.wait() 13: e.forward(t, b) \u25b7Update buffer 14: end procedure 15: function PREFETCH(m, cur_eam, eamc, cur_l, q) 16: min_d = MAX_DISTANCE 17: p_eam = NULL \u25b7Predicted EAM 18: for eam in eamc do 19: d = EAMDISTANCE(eam, cur_eam) 20: if min_dist > d then 21: min_d, p_eam = d, eam 22: for fl in m.layers[(cur_l.idx + 1) : m.n_layers] do 23: n_token = SUM(p_eam[fl.idx]) 24: for e in fl.experts do 25: p = p_eam[fl.idx][e.idx] / n_token 26: p = (p + EPSILON) \u00d7 (1 fl.idx/m.n_layers) 27: q.submit(e, p) 28: end function To track expert activation, MOE-INFINITY initializes an EAM (denoted as cur_eam, short for the current EAM) at the start of an inference procedure, setting all expert counts to 0 (step 2). The cur_eam is then carried through each generation iteration (steps 3). In every iteration, MOE-INFINITY iterates through MoE layers. Each MoE layer routes its input tokens to different experts (step 5). After this, the cur_eam is updated immediately (steps 6-7). With the updated cur_eam, the optimal priorities for expert prefetching requests can be changed. To reflect this change, MOE-INFINITY adjusts the priorities by resubmitting the prefetching requests (step 8). We will discuss the details of the priority adjustment in Section 5.2. For each activated expert, MOE-INFINITY needs to execute it by first checking if it is already available in GPU (steps 910). If not available yet, MOE-INFINITY submits a request for fetching this expert with the highest possible priority (step 11), ensuring this request jumps over all prefetching requests in the queue. Once the expert is ready, the expert takes the routed tokens as the inputs and writes its output to the token buffer (step 13). 5 5.2 Expert Prefetching Priority When submitting experts to the priority queue for prefetching, we want the experts likely to be activated sooner to be assigned a higher priority, preventing GPUs from being blocked for the arrival of these experts. In realizing this, we achieve the following aspects inherent to MoE: (1) Prefetching experts with higher predicted activation ratios. To determine which experts are more likely to be activated, we leverage historical traces stored in the EAMC. We find the prior EAM that exhibits the highest similarity compared to the current EAM, which records the experts activated so far in processing the current input sequence. We implement this by iterating through all prior EAMs in the EAMC, computing their distance with the current EAM (using Equation (1)), and finding the one with the minimal distance (steps 16-21). For the chosen prior EAM, we consider the experts in the layers that are next to execute (step 22). For these experts, we predict their likelihood of being activated based on their activation ratios and use these activation ratios to determine their initial priority (step 25). (2) Prefetching experts closer to the current executed layer. We further adjust the prefetching priorities based on the expert\u2019s layer index relative to the currently executed layer. Reasons include (i) Experts closer to the currently executed layer should have higher priorities, as they will be needed sooner by the GPU than those further away; and (ii) Experts closer to the currently executed layer provide more confident predictions for their future execution, hence the confidence in the predicted activation ratios should be discounted over layers. This design intuition is implemented in step 26. Here, we use a layer decay factor to prioritize the experts closer to the current layer. The factor has a constant decay rate that is inversely proportional to the number of MoE layers. We also distinguish the priority for experts with a zero activation ratio. By adding a small EPSILON to those experts, the priority score of the experts with zero activation ratios can also be distinguished by layer decay. 5.3 Prefetching Implementation Concerns Prefetching priority queue. The prefetching of experts is implemented by a priority queue. For this queue, MOEINFINITY has a thread dedicated to pulling the head expert and transferring it to the destination device (which can be either the GPU or the host memory). In the following, we discuss the details of managing the queue and the thread. When enqueuing an expert, we first try to remove it from the queue if it\u2019s already present, then re-enqueue it with an updated priority score. This approach ensures that the priority order is consistently maintained during task submissions. Additionally, we maintain a set of experts currently undergoing memory copy operations. If an expert is part of this set, it is skipped during the prefetch enqueue process to avoid duplication. For the dequeue process, we utilize a dedicated I/O thread on each PCIe link. This thread handles one expert at a time, effectively preventing contention among experts. Given that PCIe links do not strongly enforce priority, this method ensures a first-come, first-serve expert inference. Experts in our system typically have large sizes, which allows for full utilization of the bandwidth. Before initiating a memory copy, we check the allocation status of the expert on the target device, avoiding unnecessary I/O operations. Prefetching with multi-tier memory. We have implemented prefetching to cover multi-tier memory (including SSD, DRAM, and GPU HBM), a memory architecture commonly used in a serving-purposed GPU server. To support the multitier memory architecture, experts are first allocated to fill up the GPU memory logically. Once the GPU memory is fully utilized, subsequent experts are assigned to DRAM. Our prefetching process is designed to optimize memory transfers between different tiers. When an expert needs to be prefetched from SSD to GPU, it is initially dequeued for an SSD-to-DRAM transfer, then re-enqueued for subsequent DRAM-to-GPU prefetching. This strategy allows SSD-toDRAM and DRAM-to-GPU prefetching to occur simultaneously, maximizing the use of aggregated PCIe bandwidth. Pretching priority sensitivity. Our priority computation involves two factors: (i) The expert activation ratios and (ii) the layer decay. When designing this priority, we considered different ways to combine the two factors (e.g., weighted sum) and concluded with the method presented above as it shows the best performance in our deployment. We also considered layer decay methods, including exponential decay and inverse decay. We discovered that linear decay offers similar performance benefits and chose it for its simple nature. 6 Activation Aware Expert Caching In this section, we discuss the details of designing and implementing activation-aware expert caching. 6.1 Expert Cache We observe several key reasons why an expert cache is effective in enhancing the performance of an offloading-enabled MoE serving system. Firstly, the system frequently reuses experts during multiple iterations of a generative inference procedure. Consequently, caching these reused experts in memory locations closer to the GPU, such as GPU HBM and Host Memory, can circumvent the need for costly retrievals from SSDs. Secondly, at the outset of executing MoE layers, the system lacks adequate tracking information for expert activation, making it challenging to prefetch these experts effectively. Therefore, caching these initial layers can significantly improve their locality. 6 Algorithm 2 Expert Cache Replacement In: Expert e_put, Expert[] cache, EAM cur_eam 1: function PUT(e_put, cache, cur_eam) 2: min_p = MAX_PRIORITY 3: idx_put = 0 4: n_layers = cur_eam[:].size() 5: for idx, e in cache do 6: n_token = SUM(cur_eam[e.layer_idx]) 7: p = cur_eam[e.layer_idx][e.idx] / n_token 8: p = (p + EPSILON) * (1 e.layer_idx / n_layers) 9: if p < min_p then 10: min_p, idx_put = p, idx 11: cache[idx_put] = e_put 12: end function Driven by these reasons, we have developed a multi-tier expert cache for MOE-INFINITY. This cache comprises two levels: a GPU cache and a host memory cache. The GPU cache is initialized with experts with topological order where experts are filled into GPU layer by layer, and similarly, the host memory cache is set up with the rest experts that cannot fit into GPU, following the same procedure. When executing an expert, the GPU first searches its local cache. If the expert is not found there, the system will then fetch it from the next level up, which is the host memory cache. Should the expert be missing from both caches, the system will finally retrieve it from the SSD. To enhance the performance of the expert cache, we have delved into integrating expert activation awareness. This approach is manifested in our expert cache replacement algorithm, as detailed in Algorithm 2. The algorithm is activated when there is a need to fetch an expert into a memory device, be it GPU HBM or Host Memory, from a higher tier (e.g., SSD). It operates by considering the incoming expert, alongside an array of currently cached experts (representing the cache), and the current EAM (which records the expert activation belonging to the ongoing generative inference). In the process of selecting a cached expert for replacement, we identified two aspects of MoE activation awareness that can improve cache performance. (1) Caching experts with higher observed activation ratios. For the experts in the cache, we identify that their likelihood to be reused is proportional to the activation ratios recorded in the current EAM. The intuition is that experts experience temporal locality during each iteration of generation, meaning an expert is more likely to be reused if it has been used frequently during the past iterations. To realize this, Algorithm 2 iterates through all experts in the cache (step 5), and for each expert, it calculates the activation ratio and uses the ratio as the initial caching priority (steps 6 7). (2) Caching experts closer to the starting layer. Algorithm 2 further adjusts the expert\u2019s caching priority according to its relative location to the first MoE layer (step 8), ensuring the experts closer to the first layer are prioritized in the cache. 6.2 Cache Implementation Concerns Deciding cache capacity. We have implemented various strategies for determining the cache capacity on GPUs and host memory. For the cache on the GPU, our initial step involves assigning the dense part of the MoE model to the GPU, as this section is used in every forward pass of the model. Its constant presence in GPU memory helps reduce latency. To decide the remaining cache capacity, we first calculate the memory needed for intermediate results produced during inference, taking into account the maximum batch size and output length. The leftover GPU memory, after this allocation, is then designated as the cache for storing experts. Regarding the cache on host memory, we initially allocate memory to the inference runtime (e.g., PyTorch) and then assign the remaining memory to the cache. Integrating caching into prefetching pipelines. We have also integrated our cache designs into the expert prefetching pipeline. This integration involves decisions about when a head expert is selected from the prefetching priority queue, whether the prefetching thread should replace an expert in the cache, and in which tier of cache (GPU cache or the host memory cache) this replacement operation should be performed. To jointly optimize the performance of prefetching and caching, we need to give priority to prefetched experts over those already cached. The prefetched experts are intended to first fill up the GPU memory and then the Host Memory. When an expert is dequeued from the prefetching queue, the cache replacement algorithm is applied to the target device before initiating the actual memory copy. Caching priority sensitivity. In managing the cache, we apply a weight decay strategy similar to the one used in determining prefetching priorities. This approach ensures that the priority of cached data aligns with its likelihood of being required in upcoming tasks. By closely aligning the caching strategy with the prefetching priorities, we observe that this method provides sufficient performance (with more details reported in Section 8). 7 System Implementation and Optimizations We have implemented MOE-INFINITY in over 5,000 LoC of C++ and 6,500 LoC of Python. Currently, MOE-INFINITY supports MoE checkpoints in the format of HuggingFace and PyTorch. In addition to implementing the tracing, prefetching, and caching discussed above, our implementation has further tackled the following aspects: Optimizing multi-GPU server performance. Until now, our discussion has primarily focused on a single GPU deploy7 ment. However, MOE-INFINITY\u2019s implementation has been significantly optimized in multi-GPU server environments. To enable efficient access to experts in host memory by multiple GPUs, we have implemented extensive NUMA (NonUniform Memory Access) optimizations in our expert cache. This includes a memory pool that reduces the latency overhead of freeing large memory blocks, pinned memory that prevents redundant copying with the CUDA driver, and an individual memory pool for each NUMA node. Additionally, considering that each expert is associated with multiple tensors, which need to be readily available simultaneously for inference, MOE-INFINITY\u2019s prefetching thread fuses the copy requests for all tensors linked to a single expert, improving I/O efficiency. Finally, MOE-INFINITY allocates a prefetching thread to each GPU, enhancing the utilization of parallel PCI links connected to these GPUs. Supporting cluster deployment via expert parallelism. MOE-INFINITY can support MoE checkpoints requiring a multi-server deployment. This is achieved by integrating expert parallelism into MOE-INFINITY, as is commonly implemented in MoE systems for distributed execution. Users of MOE-INFINITY specify the number of available servers and the memory size of the MoE models. MOE-INFINITY then adopts expert parallelism to optimize communication costs among the servers by strategically assigning experts to different servers. Since offloading is enabled, the server\u2019s memory capacity can be set to a value larger than the physical memory, thereby overcoming the memory wall limitation. For experts that cannot be accommodated by the GPUs, MOE-INFINITY places these experts on local SSDs, creates expert caches in both host and GPU memory, and initiates prefetching threads. After launching the MoE serving service, MOE-INFINITY engages in expert prefetching and caching, working in tandem with expert parallelism. 8 Evaluation Our evaluation has the following goals: \u2022 Assess various performance aspects of MOE-INFINITY using real-world serving workloads and models. \u2022 Examine the effectiveness of activation-aware expert prefetching and caching in diverse scenarios. \u2022 Analyze the effectiveness of sequence-level tracing and evaluate MOE-INFINITY\u2019s efficiency in adapting to changes in data distribution. \u2022 Investigate the effects of various implementation enhancements in MOE-INFINITY. In pursuit of these goals, we compare MOE-INFINITY with numerous baseline systems and existing approaches for parameter prefetching, caching, and tracing. 8.1 Experiments Setup Testbeds. We conducted our experiments on two different testbeds as follows: (1) 8-GPU server: an 8-GPU server configured with eight NVIDIA RTX A5000 GPUs, 1TB of DDR4 memory, and two 4TB NVMe SSDs set up in RAID0, and (2) 6-node GPU cluster: a 6-node 24-GPU cluster with each node having 4 NVIDIA V100 GPUs, 385GB CPU memory. Datasets. We generate inference requests using a wide range of datasets, including BIGBench [36], FLAN [38], and MMLU [11]. These datasets include various tasks, including reasoning, contextual question answering, free response, and translation. If the dataset is not specified, we mix all three datasets to create greater variety in the input sequence, emulating a real-world chatbot service. Models. We have tested a wide range of model checkpoints obtained from HuggingFace [13], including various sizes of Switch Transformers [7] and NLLB-MoE [5]. We detail the configurations of these models in corresponding experiments. 8.2 MoE Serving Performance Our first experiments assess the performance of MOEINFINITY when serving MoEs for various inference workloads. Similar to existing serving system studies such as Clockwork [9], AlpaServe [22], our workloads are modelled after Azure workload trace [32]. Each sequence forms an inference request. Multiple sequences are batched until they either reach a maximum batch size of 16 or a maximum waiting time of one second, both parameters referenced from AlpaServe. We replayed the workload for 30 minutes and reported the average latency, post system warm-up. We consider several baselines: (i) ZERO-INFINITY: A commonly used DpeepSpeed version [29] which is configured to offload experts to the SSD, and (ii) ZERO-OFFLOAD: An optimized version of DeepSpeed [26] which offloads experts to the DRAM, achieving faster speeds in accessing offloaded experts, (iii) PYTORCH-UM: an MoE serving system that we extend PyTorch with CUDA\u2019s Unified Memory [10]. This system delegates the expert prefetching and caching to the CUDA driver, and CUDA\u2019s unified memory is widely adopted in today\u2019s offloading-supported systems, such as Sentinel [30] and DeepUM [16]. Impact of requests-per-second. Requests-Per-Second (RPS) determines the load on a serving system. We choose models: SWITCH-BASE-128, SWITCH-BASE-256, SWITCH-LARGE128 and NLLB-MOE-128, covering different model configurations. We report the results on one GPU on the 8-GPU server (results for 8 GPUs and 6-node clusters are reported later). As shown in Figure 4, for SWITCH-BASE-128, when the load is relatively low (at RPS=0.5), MOE-INFINITY achieves a latency of 99ms. With the same RPS, the latency exceeds 20s for both ZERO-INFINITY and ZERO-OFFLOAD. PYTORCH8 ZeRO-Infinity ZeRO-Offload PyT orch-UM MoE-Infinity 0 2 4 6 8 10 12 RPS 0 5 10 15 20 Latency (s) switch-base-128 0 1 2 3 4 5 6 7 RPS 0 5 10 15 20 Latency (s) switch-base-256 0.0 0.5 1.0 1.5 2.0 2.5 3.0 RPS 0 5 10 15 20 Latency (s) switch-large-128 0.00 0.25 0.50 0.75 1.00 RPS 0 5 10 15 20 Latency (s) NLLB-MoE-128 Figure 4: Impacts of RPS. UM achieves better latency (366ms) than the other two baselines because it fetches only the activated experts into GPUs, avoiding unnecessary copies. However, its performance is still not as good as MOE-INFINITY. Compared to PYTORCH-UM, we observed that MOE-INFINITY can reduce prefetching traffic by over 7GB out of a total of 13GB. When considering the one-second latency constraint, MOEINFINITY meets the constraint while increasing the RPS to 7.9, which is 2.4\u00d7 higher than PYTORCH-UM and 14.5\u00d7 higher than ZERO-INFINITY and ZERO-OFFLOAD. These results demonstrate MOE-INFINITY\u2019s ability to achieve highthroughput, low-latency inference with limited resources. Figure 4 also shows the results of SWITCH-BASE-256, allowing us to examine the performance of MOE-INFINITY with an increasing number of experts in each MoE layer. MOE-INFINITY achieves 126ms latency when RPS is 3.8. PYTORCH-UM) achieves 716ms latency when RPS is 0.6. These results show that MOE-INFINITY\u2019s sequence-level tracing can scale with the number of experts and maintain high prediction accuracy. In contrast, PYTORCH-UM and the other two baselines all exhibit degraded prefetching and caching performance when the number of experts is increased. Figure 4 additionally shows the results of serving SWITCHLARGE-128, a model with significantly more parameters and layers than SWITCH-BASE-128. MOE-INFINITY achieves 255ms latency when RPS is 1.6. In contrast, PYTORCH-UM achieves 978ms latency when RPS is 0.5. These results benefit from MOE-INFINITY\u2019s ability of continuously refining its decisions for prefetching and caching in an inference procedure, allowing it to achieve robust performance with larger MoE. Figure 4 finally shows the results of serving NLLB-MOE128, a model fine-tuned for a different task machine translation. MOE-INFINITY achieves 518ms latency and RPS as 0.6, improving latency and RPS by 3.8\u00d7 and 12\u00d7, over PYTORCH-UM. A key reason is: MOE-INFINITY constructs MoE-Infinity PyT orch-UM 0.0 0.2 0.4 0.6 0.8 1.0 Latency (s) 0.0 0.2 0.4 0.6 0.8 1.0 RPS=0.13 0.0 0.4 0.8 1.2 1.6 Latency (s) 0.0 0.2 0.4 0.6 0.8 1.0 RPS=1.6 switch-large-128 0.0 0.5 1.0 1.5 2.0 Latency (s) 0.0 0.2 0.4 0.6 0.8 1.0 RPS=0.05 0 1 2 3 4 5 6 7 Latency (s) 0.0 0.2 0.4 0.6 0.8 1.0 RPS=0.61 NLLB-MoE-128 Figure 5: Latency CDF. EAMC for different relevant datasets and it maintains EAM for ongoing inference, showing its adaptiveness with various workloads. Latency CDF. We provide a detailed view of the latency across all requests through CDF plots. In these plots, we include only MOE-INFINITY and the best-performing baseline, PYTORCH-UM. Figure 5 shows the CDFs of both SWITCH-LARGE-128 and NLLB-MOE-128. Here, we show the systems in both low load and high load, where the low load indicates a low queuing time and the latency is dominated by the inference time, while the high load indicates a high queuing time and the latency starts to be dominated by the queuing time. With a low load, we can observe MOE-INFINITY achieves stable low latency performance for all requests with both models. This is because activation-aware prefetching and caching have largely eliminated the need for slow on-demand fetching of experts, minimizing the GPU\u2019s blocking time. With the same load, PYTORCH-UM suffers from slow ondemand expert fetching, particularly evident with the model NLLB-MOE-128, where the tail latency is 22.1\u00d7 worse than MOE-INFINITY. This performance improvement is also verified with a deeper tracing in the system: we observe that MOE-INFINITY leads to 67% less on-demand fetching than PYTORCH-UM. With a high load, MOE-INFINITY can still maintain constant low latency across different requests. In contrast, the latency of PYTORCH-UM dominantly shift to around 6.5s in the case of serving NLLB-MOE-128. In this case, the GPU utilization of PYTORCH-UM is below 10% since its GPU is blocked by excessive on-demand fetching and page faults in the operating system, both of which are effectively mitigated in MOE-INFINITY. Impact of batch size. Batch sizes can determine the ratio of activated experts when processing an input batch. Hence, we measure the performance of MOE-INFINITY and the baselines with batch sizes ranging from 1 to 64. For batch sizes over 64, both MOE-INFINITY and baseline start to suffer from outof-memory issues due to the exploding size of the KV-cache used in generative inference. As shown in Figure 6, with SWITCH-LARGE-128, we can observe MOE-INFINITY constantly achieves better latency 9 ZeRO-Infinity ZeRO-Offload PyT orch-UM MoE-Infinity 0 1 2 3 4 5 6 7 8 9 1 2 4 8 16 32 64 Latency (s) switch-large-128 0 2 4 6 8 10 12 14 16 1 2 4 8 16 32 64 Latency (s) NLLB-MoE-128 Figure 6: Impacts of batch size. ZeRO-Infinity ZeRO-Offload PyT orch-UM MoE-Infinity 1 2 4 8 Number of GPUs 0 2 4 6 8 Latency (s) switch-large-128 ZeRO-Offload MoE-Infinity 1 2 4 8 Number of GPUs 0 2 4 6 8 Latency (s) NLLB-MoE-128 ZeRO-Offload MoE-Infinity Figure 7: Cost efficiency. performance than the baselines with batch sizes up to 64. This is because the sparse activation and the temporal locality remain evident even with these relatively large batch sizes. In practice, typical batch sizes in a serving system are in the range of 1 to 32 (2 to 16 reported in AlpaServe and 8 to 32 reported in Orca), indicating that activation-aware expert offloading is useful for typical serving scenarios. For NLLB-MOE-128, MOE-INFINITY achieves latency as 326ms at batch size 1 and 1.81s at batch size 64. Considering PYTORCH-UM, its latency jumps to 15.38s at batch size 64, indicating that the larger batch size complicates the activation patterns of experts. As a result, maintaining the aggregated metrics for their activation (the case for LFU and LRU) fails to capture the sparse activation and temporal locality in MoE. Cost efficiency. We also measure how much cost can be saved by using MOE-INFINITY. Here, we consider the number of GPUs as the indicator of the cost of a long-lasting serving system. We have to exclude the results of PYTORCH-UM since the CUDA Unified Memory does not support the sharing of PyTorch tensors in the host memory among multiple GPUs. According to Figure 7, with SWITCH-LARGE-128, ZEROOFFLOAD requires 4\u00d7 more GPUs than MOE-INFINITY to achieve the required one-second latency requirement. Considering NLLB-MOE-128, ZERO-OFFLOAD cannot meet the one-second latency requirement even with 8 GPUs, and it reaches 1.9s latency. In contrast, MOE-INFINITY can reach 122ms even with a single GPU. With these results, MOEINFINITY can achieve over 8 \u00d7 better cost efficiency than the SOTA multi-GPU serving system. As in NLLB-MOE-128, it is not sufficient for ZeRO to put all parameters in 8 GPUs, thus still suffering from redundant prefetching traffic. Meanwhile MOE-INFINITY already achieves performance as if all parameters needed are in GPU. In a multi-GPU environZeRO-Infinity ZeRO-Offload PyT orch-UM MoE-Infinity 0 1 2 3 4 5 6 FLAN BIGBench MMLU Latency (s) switch-large-128 0 2 4 6 8 10 12 14 16 FLAN BIGBench MMLU Latency (s) NLLB-MoE-128 Figure 8: Impacts of datasets. 8 16 32 64 128 256 Number of Experts 0.00 0.25 0.50 0.75 1.00 Prefetch Accuracy MoE-Infinity T opK Traced-T opK Figure 9: Impacts of numbers of experts. ment, we observe that MOE-INFINITY not only benefits from better prefetching and caching, but it also benefits from our multi-GPU optimizations with details reported later in this section. Impacts of datasets. The sparse activation and temporal locality can also vary across different datasets. We evaluate this and report results in Figure 8. Across all datasets (FLAN, BIGBench and MMLU), we can observe MOE-INFINITY consistently achieves much lower latency performance than all baselines. These results show that the EAMC and the EAM used in MOE-INFINITY allow it to adapt to different activation patterns. In MOE-INFINITY, the latency varies by 96ms for NLLB-MOE-128, while in ZeRO it is as large as 2s. 8.3 Activation Aware Expert Prefetching We then show a micro-benchmark of activation-aware expert prefetching. For a fair comparison, we implement several prefetching approaches in MOE-INFINITY: (i) TOPK: an approach adopted by ZERO-INFINITY, which prefetches the top-K experts by expert IDs in the next layer. The top-K is optimized using the automatic performance tuning toolkit from the same library for exhibiting the best performance. (ii) TRACED-TOPK: an approach adopted by BRAINSTORM, which traces the usage frequency of experts and prefetches the top-K popular experts in the next layer, and the top-K parameter is empirically optimized and we only report its best performance. Impacts of numbers of experts. The number of experts decides the difficulty of predicting expert activation. To study this, we use SwitchTransformer with the number of experts per layer ranging from 8, 16, 32, 64, 128 to 256. For all these models, we measure the prediction accuracy achieved by dif10 MoE-Infinity T opK Traced-T opK 8 16 32 64 128 Bandwidth (GB/s) 0.00 0.25 0.50 0.75 1.00 Prefetch Accuracy switch-large-128 8 16 32 64 128 Bandwidth (GB/s) 0.00 0.25 0.50 0.75 1.00 Prefetch Accuracy NLLB-MoE-128 Figure 10: Impacts of prefetching bandwidth. ferent prefetching methods. From Figure 9, with only 8 experts per layer, MOEINFINITY and baselines all achieve high prediction accuracy. With more experts, MOE-INFINITY starts to show noticeable gains in prediction accuracy, and this gain is evident with 32 experts per layer. With 256 experts per layer, MOE-INFINITY achieves 55% accuracy, while TRACED-TOPK achieves 34% and TOPK achieves 7%. These results show that to accurately capture the activation patterns, we need not only tracing (done in TRACED-TOPK), but also more fine-grained sequence-level tracing and continuous prediction refinement. Impacts of prefetching bandwidth. Prefetching performance is also heavily decided by the bandwidth between different memory tiers (e.g., bandwidth between GPU and host memory). To study this, we evaluate MOE-INFINITY with baseline prefetching methods with simulated prefetching bandwidth from 8GB/s to 128GB/s representing different generations of PCIe techniques. We measure the prefetch accuracy as the recall of experts activated is covered by prefetching. We expect a larger bandwidth can accommodate more misprediction. From Figure 10, with SWITCH-LARGE-128, we observe that the accuracy of MOE-INFINITY increases linearly and faster than baselines, achieving 2.6x higher accuracy (from 38% to 98%) under 128GB/s bandwidth. MOE-INFINITY prefetches not only experts in the next layer but also experts deeper in models if bandwidth is available, significantly improving the effectiveness of enabling prefetching. In contrast, baselines only look at the next layer, limiting its capability. As for NLLB-MOE-128, MOE-INFINITY achieves 1.5x higher accuracy (from 63% to 99%) under 128GB/s bandwidth. This is because NLLB-MOE-128 dominantly handles sequences for the English translation, and its expert activation exhibits a high degree of similarity, easy for baselines. Effects of continuous priority refinement. We assess the effects of continuous priority refinement. After disabling the continuous refinement, MOE-INFINITY makes a one-shot prediction for prefetching after the first MoE layer router outputs. With the refinement enbled, the prediction is updated at every MoE layer. In the case of a server with PCIe 4.0 (32GB/s prefetch bandwidth), disabling refinement degrades accuracy by 10% for SWITCH-BASE-128 and 23% for NLLBOracle MoE-Infinity LFU NEIGHBOR-AWARE LRU 4 12 20 28 36 40 Memory Size (GB) 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0 Cache Hit Ratio switch-large-128 4 12 20 28 36 40 Memory Size (GB) 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0 Cache Hit Ratio NLLB-MoE-128 Figure 11: Impacts of cache size. MOE-128. Effects of activation-aware priority. We assess the effectiveness of using an activation-aware priority when prefetching experts. Here, we measure the expert-ready latency on the GPU (i.e., time blocking at waiting for expert parameters). By disabling priority we only allow on-demand fetch to jump queue prefetching. Our measurement shows the tail latency to be reduced by 4\u00d7 when MOE-INFINITY\u2019s activation-aware priority is enabled for switch-large-128. 8.4 Activation Aware Expert Caching We also show micro-benchmarks for activation-aware expert caching. For a fair comparison, we implement several baseline caching approaches in MOE-INFINITY: (i) LEASTFREQUENT-USED (LFU): an approach adopted by BrainStorm for its cache management, (ii) LEAST-RECENT-USED (LRU): an approach adopted by the CUDA driver in managing the system cache for objects in unified memory, (iii) NEIGHBOR-AWARE: an approach adopted by ZEROINFINITY which enforces the neighbors of experts to be cached together, improving cache hit ratios compared to LRU, and (iv) ORACLE: representing the theoretical best caching results through a cache\u2019s trace analysis. Impacts of cache size. We want to study how the cache size affects the performance of MOE-INFINITY. We vary cache sizes from 4GB to 40GB and measure the cache hit ratios of MOE-INFINITY and baseline approaches. Figure 11 shows the result for SWITCH-LARGE-128. In our single GPU setting, the available cache size for experts in SWITCH-LARGE-128 is 15GB (caching at most 535 experts among 3072 experts). At this point, the hit ratio is 46%, which is 10% lower than ORACLE, while the best baseline reaches 32%. The higher hit ratios in MOE-INFINITY mainly benefit from its priority score design, which prioritizes (i) experts reused in multiple iterations (on average 38% experts are reused), as well as (ii) experts in the initial layers (30% of cache hit cases fits for this purpose). Figure 11 also shows the result for NLLB-MOE-128. With a small cache size of 8GB, MOE-INFINITY can only cache at most 60 experts out of 1536 experts. In this case, the hit ratio is 34%, 9% lower than ORACLE, while the best baseline reaches 21%. LRU and NEIGHBOR-AWARE perform 11 25 50 75 100 125 EAMC Capacity 0.2 0.4 0.6 0.8 1.0 Latency (s) NLLB Switch 25 50 75 100 125 EAMC Capcity 0.6 0.7 0.8 0.9 Prefetch Accuracy Switch NLLB Figure 12: Impacts of EAMC capacity. worse in such cases, as the reuse of an expert may cross multiple iterations, and this reuse opportunity cannot be captured since the cache size is limited. LFU only counts for access frequency for the export in the cache. When the expert is evicted, the counter is reset, failing to account for the reuse across iterations. When the cache size is sufficient to cover all experts used in iterations (40GB), EAM is essentially LFU that counts for expert activation frequency without counter reset. Caching priority breakdown. Here we explain further in detail on how each component in the caching priority score contributes to the overall improvement in the real deployment setting. Using only layer decay priority, MOE-INFINITY improves 6% (44% of the total improvement) in SWITCHLARGE-128 and 7.5% (57% of the total improvement) in NLLB-MOE-128 from LFU. This is because NLLB-MOE-128 experiences more uniform activation on the first layer compared to SWITCH-LARGE-128. Cross-iteration priority covers the rest 7% and 5.5% for SWITCH-LARGE-128 and NLLBMOE-128. 8.5 Sequence-Level Expert Activation Tracing We further study several practical concerns when conducting sequence-level expert activation tracing. Impacts of EAMC capacity. We want to know how the capacity of EAMC affects MOE-INFINITY\u2019s performance. We conducted 3000 inference requests from the mixed dataset against both models, increasing the capacity of EAMC to observe changes in inference latency and prediction accuracy for prefetching. Figure 12 shows the results of our experiment. \u2018Switch\u2019 stands for SWITCH-LARGE-128, and \u2018NLLB\u2019 stands for NLLB-MOE-128 As the EAMC capacity increases, the latency of both models gradually decreases until it reaches a stable point. Similarly, the accuracy of predictions increases and eventually converges to a steady state. Increasing the EAMC capacity has a marginal effect. When the EAMC capacity of SWITCH-LARGE-128 reaches 100, and the capacity of NLLB-MOE-128 reaches 110, further capacity increases no longer improve latency or prediction accuracy. This indicates that when the EAMC capacity reaches a certain level, the system is capable of accommodating a sufficient number of requests such that these requests can represent the majority of possible activation patterns. 1 2 3 4 5 6 Number of Nodes 0.10 0.15 0.20 Latency (s) switch-large-128 1 2 3 4 5 6 Number of Nodes 4 6 8 10 12 TP (token/s) 1 2 3 4 5 6 Number of Nodes 0.20 0.22 Latency (s) NLLB-MoE-128 1 2 3 4 5 6 Number of Nodes 1.0 1.4 1.8 2.2 TP (token/s) Figure 13: Cluster scalability. Memory and computation cost of EAMC. Since EAMC is used online for activation prediction, we want to study its performance overheads. Here, we set the limit of memory size as 1.8MB which allows EAMC to accommodate up to 300 EAMs. Searching for the most similar EAM from EAMC costs 21us. Both memory and computation overhead are less than 1% of the model inference memory and latency. Impacts of distribution drift. We are concerned about how MOE-INFINITY performs if there is a drift in the data distribution in an online serving scenario. To study this, we conducted an experiment where MOE-INFINITY was initially deployed on MMLU for 100 inference requests. Subsequently, we transitioned the dataset to BIGBench to observe the number of input sequences required for MOE-INFINITY to re-adapt to the new data distribution. We rerun the construction algorithm for EAMC after 100 input sequences from the new dataset. For all the models we considered, MOE-INFINITY uses approximately 10-13 input sequences to wait for the reconstruction to finish in the background. Prediction accuracy recovers immediately after that. In practice, we have implemented the online reconstruction of EAMC in MOE-INFINITY. 8.6 Effects of Implementation Optimizations Finally, we report the effects of the various implementation optimizations in MOE-INFINITY. Cluster scalability. With the support of expert parallelism, MOE-INFINITY can use multiple nodes to host MoEs that cannot be fitted into the SSD of a GPU server. For this, we want to study the cluster scalability of MOE-INFINITY. Figure 13 (top) demonstrates the latency. We observe that the latency scales down with the number of nodes. SWITCH-LARGE-128 has 200ms latency on one node and decreases to 97ms on 6 nodes. MOE-INFINITY preserves the parameter placement returned by the expert parallelism planner (the same as the one by DeepSpeed). Figure 13 (bottom) demonstrates the inference throughput (TP). We observe that the TP scales with the number of nodes. NLLB-MOE-128 has TP=0.6K tokens/s on one node and scales to TP=2.4K tokens/s on 6 nodes. Effects of multi-GPU server optimization. MOE-INFINITY adopts many optimizations for its performance on a multiGPU server. In the case of serving SWITCH-LARGE-128, after enabling atomic memory copy, MOE-INFINITY experiences 12 2.2\u00d7 speedup (7.2 ms down to 3.3 ms) on copying experts from DRAM to GPU and 1.3\u00d7 speedup on SSD to DRAM (4 ms down to 3 ms). The bandwidth on PCIe links can be fully utilized even though experts contain multiple small tensors. Then, applying the NUMA memory pool results in an additional 1.4\u00d7 improvement by avoiding cross-NUMA data movement, bringing latency down to 2 ms per expert. This paper describes the design and evaluation of MOEINFINITY. By employing various methods for activationaware expert offloading, MOE-INFINITY significantly outperforms SOTA systems when serving various MoEs. We expect that the effectiveness of MOE-INFINITY will be further evident on the upcoming serving servers (e.g., NVIDIA Grace-Hopper), where the bandwidth between host memory and GPUs is anticipated to increase substantially, enhancing the efficiency of the expert offloading approach. We plan to open-source MOE-INFINITY upon acceptance for publication, and we foresee MOE-INFINITY becoming a platform for exploring MoE-optimized deployment systems." + }, + { + "url": "http://arxiv.org/abs/2309.06180v1", + "title": "Efficient Memory Management for Large Language Model Serving with PagedAttention", + "abstract": "High throughput serving of large language models (LLMs) requires batching\nsufficiently many requests at a time. However, existing systems struggle\nbecause the key-value cache (KV cache) memory for each request is huge and\ngrows and shrinks dynamically. When managed inefficiently, this memory can be\nsignificantly wasted by fragmentation and redundant duplication, limiting the\nbatch size. To address this problem, we propose PagedAttention, an attention\nalgorithm inspired by the classical virtual memory and paging techniques in\noperating systems. On top of it, we build vLLM, an LLM serving system that\nachieves (1) near-zero waste in KV cache memory and (2) flexible sharing of KV\ncache within and across requests to further reduce memory usage. Our\nevaluations show that vLLM improves the throughput of popular LLMs by\n2-4$\\times$ with the same level of latency compared to the state-of-the-art\nsystems, such as FasterTransformer and Orca. The improvement is more pronounced\nwith longer sequences, larger models, and more complex decoding algorithms.\nvLLM's source code is publicly available at\nhttps://github.com/vllm-project/vllm", + "authors": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, Ion Stoica", + "published": "2023-09-12", + "updated": "2023-09-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2006.16668v1", + "title": "GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding", + "abstract": "Neural network scaling has been critical for improving the model quality in\nmany real-world machine learning applications with vast amounts of training\ndata and compute. Although this trend of scaling is affirmed to be a sure-fire\napproach for better model quality, there are challenges on the path such as the\ncomputation cost, ease of programming, and efficient implementation on parallel\ndevices. GShard is a module composed of a set of lightweight annotation APIs\nand an extension to the XLA compiler. It provides an elegant way to express a\nwide range of parallel computation patterns with minimal changes to the\nexisting model code. GShard enabled us to scale up multilingual neural machine\ntranslation Transformer model with Sparsely-Gated Mixture-of-Experts beyond 600\nbillion parameters using automatic sharding. We demonstrate that such a giant\nmodel can efficiently be trained on 2048 TPU v3 accelerators in 4 days to\nachieve far superior quality for translation from 100 languages to English\ncompared to the prior art.", + "authors": "Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, Zhifeng Chen", + "published": "2020-06-30", + "updated": "2020-06-30", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2210.17223v2", + "title": "Accelerating Distributed MoE Training and Inference with Lina", + "abstract": "Scaling model parameters improves model quality at the price of high\ncomputation overhead. Sparsely activated models, usually in the form of Mixture\nof Experts (MoE) architecture, have sub-linear scaling of computation cost with\nmodel size, thus providing opportunities to train and serve a larger model at\nlower cost than their dense counterparts. However, distributed MoE training and\ninference is inefficient, mainly due to the interleaved all-to-all\ncommunication during model computation. This paper makes two main\ncontributions. First, we systematically analyze all-to-all overhead in\ndistributed MoE and present the main causes for it to be the bottleneck in\ntraining and inference, respectively. Second, we design and build Lina to\naddress the all-to-all bottleneck head-on. Lina opportunistically prioritizes\nall-to-all over the concurrent allreduce whenever feasible using tensor\npartitioning, so all-to-all and training step time is improved. Lina further\nexploits the inherent pattern of expert selection to dynamically schedule\nresources during inference, so that the transfer size and bandwidth of\nall-to-all across devices are balanced amid the highly skewed expert popularity\nin practice. Experiments on an A100 GPU testbed show that Lina reduces the\ntraining step time by up to 1.73x and reduces the 95%ile inference time by an\naverage of 1.63x over the state-of-the-art systems.", + "authors": "Jiamin Li, Yimin Jiang, Yibo Zhu, Cong Wang, Hong Xu", + "published": "2022-10-31", + "updated": "2024-04-28", + "primary_cat": "cs.DC", + "cats": [ + "cs.DC" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2201.05596v2", + "title": "DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale", + "abstract": "As the training of giant dense models hits the boundary on the availability\nand capability of the hardware resources today, Mixture-of-Experts (MoE) models\nbecome one of the most promising model architectures due to their significant\ntraining cost reduction compared to a quality-equivalent dense model. Its\ntraining cost saving is demonstrated from encoder-decoder models (prior works)\nto a 5x saving for auto-aggressive language models (this work along with\nparallel explorations). However, due to the much larger model size and unique\narchitecture, how to provide fast MoE model inference remains challenging and\nunsolved, limiting its practical usage. To tackle this, we present\nDeepSpeed-MoE, an end-to-end MoE training and inference solution as part of the\nDeepSpeed library, including novel MoE architecture designs and model\ncompression techniques that reduce MoE model size by up to 3.7x, and a highly\noptimized inference system that provides 7.3x better latency and cost compared\nto existing MoE inference solutions. DeepSpeed-MoE offers an unprecedented\nscale and efficiency to serve massive MoE models with up to 4.5x faster and 9x\ncheaper inference compared to quality-equivalent dense models. We hope our\ninnovations and systems help open a promising path to new directions in the\nlarge model landscape, a shift from dense to sparse MoE models, where training\nand deploying higher-quality models with fewer resources becomes more widely\npossible.", + "authors": "Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, Yuxiong He", + "published": "2022-01-14", + "updated": "2022-07-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2310.02410v1", + "title": "Mixture of Quantized Experts (MoQE): Complementary Effect of Low-bit Quantization and Robustness", + "abstract": "Large Mixture of Experts (MoE) models could achieve state-of-the-art quality\non various language tasks, including machine translation task, thanks to the\nefficient model scaling capability with expert parallelism. However, it has\nbrought a fundamental issue of larger memory consumption and increased memory\nbandwidth bottleneck at deployment time. In this paper, we propose Mixture of\nQuantized Experts (MoQE) which is a simple weight-only quantization method\napplying ultra low-bit down to 2-bit quantizations only to expert weights for\nmitigating the increased memory and latency issues of MoE models. We show that\nlow-bit quantization together with the MoE architecture delivers a reliable\nmodel performance while reducing the memory size significantly even without any\nadditional training in most cases. In particular, expert layers in MoE models\nare much more robust to the quantization than conventional feedforward networks\n(FFN) layers. In our comprehensive analysis, we show that MoE models with 2-bit\nexpert weights can deliver better model performance than the dense model\ntrained on the same dataset. As a result of low-bit quantization, we show the\nmodel size can be reduced by 79.6% of the original half precision floating\npoint (fp16) MoE model. Combined with an optimized GPU runtime implementation,\nit also achieves 1.24X speed-up on A100 GPUs.", + "authors": "Young Jin Kim, Raffy Fahim, Hany Hassan Awadalla", + "published": "2023-10-03", + "updated": "2023-10-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2206.00277v2", + "title": "Task-Specific Expert Pruning for Sparse Mixture-of-Experts", + "abstract": "The sparse Mixture-of-Experts (MoE) model is powerful for large-scale\npre-training and has achieved promising results due to its model capacity.\nHowever, with trillions of parameters, MoE is hard to be deployed on cloud or\nmobile environment. The inference of MoE requires expert parallelism, which is\nnot hardware-friendly and communication expensive. Especially for\nresource-limited downstream tasks, such sparse structure has to sacrifice a lot\nof computing efficiency for limited performance gains. In this work, we observe\nmost experts contribute scarcely little to the MoE fine-tuning and inference.\nWe further propose a general method to progressively drop the non-professional\nexperts for the target downstream task, which preserves the benefits of MoE\nwhile reducing the MoE model into one single-expert dense model. Our\nexperiments reveal that the fine-tuned single-expert model could preserve 99.3%\nbenefits from MoE across six different types of tasks while enjoying 2x\ninference speed with free communication cost.", + "authors": "Tianyu Chen, Shaohan Huang, Yuan Xie, Binxing Jiao, Daxin Jiang, Haoyi Zhou, Jianxin Li, Furu Wei", + "published": "2022-06-01", + "updated": "2022-06-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.10863v1", + "title": "Quiver: Supporting GPUs for Low-Latency, High-Throughput GNN Serving with Workload Awareness", + "abstract": "Systems for serving inference requests on graph neural networks (GNN) must\ncombine low latency with high throughout, but they face irregular computation\ndue to skew in the number of sampled graph nodes and aggregated GNN features.\nThis makes it challenging to exploit GPUs effectively: using GPUs to sample\nonly a few graph nodes yields lower performance than CPU-based sampling; and\naggregating many features exhibits high data movement costs between GPUs and\nCPUs. Therefore, current GNN serving systems use CPUs for graph sampling and\nfeature aggregation, limiting throughput.\n We describe Quiver, a distributed GPU-based GNN serving system with\nlow-latency and high-throughput. Quiver's key idea is to exploit workload\nmetrics for predicting the irregular computation of GNN requests, and governing\nthe use of GPUs for graph sampling and feature aggregation: (1) for graph\nsampling, Quiver calculates the probabilistic sampled graph size, a metric that\npredicts the degree of parallelism in graph sampling. Quiver uses this metric\nto assign sampling tasks to GPUs only when the performance gains surpass\nCPU-based sampling; and (2) for feature aggregation, Quiver relies on the\nfeature access probability to decide which features to partition and replicate\nacross a distributed GPU NUMA topology. We show that Quiver achieves up to 35\ntimes lower latency with an 8 times higher throughput compared to\nstate-of-the-art GNN approaches (DGL and PyG).", + "authors": "Zeyuan Tan, Xiulong Yuan, Congjie He, Man-Kit Sit, Guo Li, Xiaoze Liu, Baole Ai, Kai Zeng, Peter Pietzuch, Luo Mai", + "published": "2023-05-18", + "updated": "2023-05-18", + "primary_cat": "cs.DC", + "cats": [ + "cs.DC", + "cs.AI", + "cs.LG", + "cs.OS" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2310.16795v1", + "title": "QMoE: Practical Sub-1-Bit Compression of Trillion-Parameter Models", + "abstract": "Mixture-of-Experts (MoE) architectures offer a general solution to the high\ninference costs of large language models (LLMs) via sparse routing, bringing\nfaster and more accurate models, at the cost of massive parameter counts. For\nexample, the SwitchTransformer-c2048 model has 1.6 trillion parameters,\nrequiring 3.2TB of accelerator memory to run efficiently, which makes practical\ndeployment challenging and expensive. In this paper, we present a solution to\nthis memory problem, in form of a new compression and execution framework\ncalled QMoE. Specifically, QMoE consists of a scalable algorithm which\naccurately compresses trillion-parameter MoEs to less than 1 bit per parameter,\nin a custom format co-designed with bespoke GPU decoding kernels to facilitate\nefficient end-to-end compressed inference, with minor runtime overheads\nrelative to uncompressed execution. Concretely, QMoE can compress the 1.6\ntrillion parameter SwitchTransformer-c2048 model to less than 160GB (20x\ncompression, 0.8 bits per parameter) at only minor accuracy loss, in less than\na day on a single GPU. This enables, for the first time, the execution of a\ntrillion-parameter model on affordable commodity hardware, like a single server\nwith 4x NVIDIA A6000 or 8x NVIDIA 3090 GPUs, at less than 5% runtime overhead\nrelative to ideal uncompressed inference. The source code and compressed models\nare available at github.com/IST-DASLab/qmoe.", + "authors": "Elias Frantar, Dan Alistarh", + "published": "2023-10-25", + "updated": "2023-10-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2401.06066v1", + "title": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", + "abstract": "In the era of large language models, Mixture-of-Experts (MoE) is a promising\narchitecture for managing computational costs when scaling up model parameters.\nHowever, conventional MoE architectures like GShard, which activate the top-$K$\nout of $N$ experts, face challenges in ensuring expert specialization, i.e.\neach expert acquires non-overlapping and focused knowledge. In response, we\npropose the DeepSeekMoE architecture towards ultimate expert specialization. It\ninvolves two principal strategies: (1) finely segmenting the experts into $mN$\nones and activating $mK$ from them, allowing for a more flexible combination of\nactivated experts; (2) isolating $K_s$ experts as shared ones, aiming at\ncapturing common knowledge and mitigating redundancy in routed experts.\nStarting from a modest scale with 2B parameters, we demonstrate that\nDeepSeekMoE 2B achieves comparable performance with GShard 2.9B, which has 1.5\ntimes the expert parameters and computation. In addition, DeepSeekMoE 2B nearly\napproaches the performance of its dense counterpart with the same number of\ntotal parameters, which set the upper bound of MoE models. Subsequently, we\nscale up DeepSeekMoE to 16B parameters and show that it achieves comparable\nperformance with LLaMA2 7B, with only about 40% of computations. Further, our\npreliminary efforts to scale up DeepSeekMoE to 145B parameters consistently\nvalidate its substantial advantages over the GShard architecture, and show its\nperformance comparable with DeepSeek 67B, using only 28.5% (maybe even 18.2%)\nof computations.", + "authors": "Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, Wenfeng Liang", + "published": "2024-01-11", + "updated": "2024-01-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2404.15045v1", + "title": "Multi-Head Mixture-of-Experts", + "abstract": "Sparse Mixtures of Experts (SMoE) scales model capacity without significant\nincreases in training and inference costs, but exhibits the following two\nissues: (1) Low expert activation, where only a small subset of experts are\nactivated for optimization. (2) Lacking fine-grained analytical capabilities\nfor multiple semantic concepts within individual tokens. We propose Multi-Head\nMixture-of-Experts (MH-MoE), which employs a multi-head mechanism to split each\ntoken into multiple sub-tokens. These sub-tokens are then assigned to and\nprocessed by a diverse set of experts in parallel, and seamlessly reintegrated\ninto the original token form. The multi-head mechanism enables the model to\ncollectively attend to information from various representation spaces within\ndifferent experts, while significantly enhances expert activation, thus deepens\ncontext understanding and alleviate overfitting. Moreover, our MH-MoE is\nstraightforward to implement and decouples from other SMoE optimization\nmethods, making it easy to integrate with other SMoE models for enhanced\nperformance. Extensive experimental results across three tasks: English-focused\nlanguage modeling, Multi-lingual language modeling and Masked multi-modality\nmodeling tasks, demonstrate the effectiveness of MH-MoE.", + "authors": "Xun Wu, Shaohan Huang, Wenhui Wang, Furu Wei", + "published": "2024-04-23", + "updated": "2024-04-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.00361v1", + "title": "AdaMoLE: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts", + "abstract": "We introduce AdaMoLE, a novel method for fine-tuning large language models\n(LLMs) through an Adaptive Mixture of Low-Rank Adaptation (LoRA) Experts.\nMoving beyond conventional methods that employ a static top-k strategy for\nactivating experts, AdaMoLE dynamically adjusts the activation threshold using\na dedicated threshold network, adaptively responding to the varying\ncomplexities of different tasks. By replacing a single LoRA in a layer with\nmultiple LoRA experts and integrating a gating function with the threshold\nmechanism, AdaMoLE effectively selects and activates the most appropriate\nexperts based on the input context. Our extensive evaluations across a variety\nof commonsense reasoning and natural language processing tasks show that\nAdaMoLE exceeds baseline performance. This enhancement highlights the\nadvantages of AdaMoLE's adaptive selection of LoRA experts, improving model\neffectiveness without a corresponding increase in the expert count. The\nexperimental validation not only confirms AdaMoLE as a robust approach for\nenhancing LLMs but also suggests valuable directions for future research in\nadaptive expert selection mechanisms, potentially broadening the scope for\noptimizing model performance across diverse language processing tasks.", + "authors": "Zefang Liu, Jiahua Luo", + "published": "2024-05-01", + "updated": "2024-05-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.09368v2", + "title": "Mixture-of-Experts with Expert Choice Routing", + "abstract": "Sparsely-activated Mixture-of-experts (MoE) models allow the number of\nparameters to greatly increase while keeping the amount of computation for a\ngiven token or a given sample unchanged. However, a poor expert routing\nstrategy (e.g. one resulting in load imbalance) can cause certain experts to be\nunder-trained, leading to an expert being under or over-specialized. Prior work\nallocates a fixed number of experts to each token using a top-k function\nregardless of the relative importance of different tokens. To address this, we\npropose a heterogeneous mixture-of-experts employing an expert choice method.\nInstead of letting tokens select the top-k experts, we have experts selecting\nthe top-k tokens. As a result, each token can be routed to a variable number of\nexperts and each expert can have a fixed bucket size. We systematically study\npre-training speedups using the same computational resources of the Switch\nTransformer top-1 and GShard top-2 gating of prior work and find that our\nmethod improves training convergence time by more than 2x. For the same\ncomputational cost, our method demonstrates higher performance in fine-tuning\n11 selected tasks in the GLUE and SuperGLUE benchmarks. For a smaller\nactivation cost, our method outperforms the T5 dense model in 7 out of the 11\ntasks.", + "authors": "Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, James Laudon", + "published": "2022-02-18", + "updated": "2022-10-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.01899v1", + "title": "MiCE: Mixture of Contrastive Experts for Unsupervised Image Clustering", + "abstract": "We present Mixture of Contrastive Experts (MiCE), a unified probabilistic\nclustering framework that simultaneously exploits the discriminative\nrepresentations learned by contrastive learning and the semantic structures\ncaptured by a latent mixture model. Motivated by the mixture of experts, MiCE\nemploys a gating function to partition an unlabeled dataset into subsets\naccording to the latent semantics and multiple experts to discriminate distinct\nsubsets of instances assigned to them in a contrastive learning manner. To\nsolve the nontrivial inference and learning problems caused by the latent\nvariables, we further develop a scalable variant of the\nExpectation-Maximization (EM) algorithm for MiCE and provide proof of the\nconvergence. Empirically, we evaluate the clustering performance of MiCE on\nfour widely adopted natural image datasets. MiCE achieves significantly better\nresults than various previous methods and a strong contrastive learning\nbaseline.", + "authors": "Tsung Wei Tsai, Chongxuan Li, Jun Zhu", + "published": "2021-05-05", + "updated": "2021-05-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1911.08151v2", + "title": "Retrospective and Prospective Mixture-of-Generators for Task-oriented Dialogue Response Generation", + "abstract": "Dialogue response generation (DRG) is a critical component of task-oriented\ndialogue systems (TDSs). Its purpose is to generate proper natural language\nresponses given some context, e.g., historical utterances, system states, etc.\nState-of-the-art work focuses on how to better tackle DRG in an end-to-end way.\nTypically, such studies assume that each token is drawn from a single\ndistribution over the output vocabulary, which may not always be optimal.\nResponses vary greatly with different intents, e.g., domains, system actions.\n We propose a novel mixture-of-generators network (MoGNet) for DRG, where we\nassume that each token of a response is drawn from a mixture of distributions.\nMoGNet consists of a chair generator and several expert generators. Each expert\nis specialized for DRG w.r.t. a particular intent. The chair coordinates\nmultiple experts and combines the output they have generated to produce more\nappropriate responses. We propose two strategies to help the chair make better\ndecisions, namely, a retrospective mixture-of-generators (RMoG) and prospective\nmixture-of-generators (PMoG). The former only considers the historical\nexpert-generated responses until the current time step while the latter also\nconsiders possible expert-generated responses in the future by encouraging\nexploration. In order to differentiate experts, we also devise a\nglobal-and-local (GL) learning scheme that forces each expert to be specialized\ntowards a particular intent using a local loss and trains the chair and all\nexperts to coordinate using a global loss.\n We carry out extensive experiments on the MultiWOZ benchmark dataset. MoGNet\nsignificantly outperforms state-of-the-art methods in terms of both automatic\nand human evaluations, demonstrating its effectiveness for DRG.", + "authors": "Jiahuan Pei, Pengjie Ren, Christof Monz, Maarten de Rijke", + "published": "2019-11-19", + "updated": "2020-02-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05220v1", + "title": "On Parameter Estimation in Deviated Gaussian Mixture of Experts", + "abstract": "We consider the parameter estimation problem in the deviated Gaussian mixture\nof experts in which the data are generated from $(1 - \\lambda^{\\ast}) g_0(Y|\nX)+ \\lambda^{\\ast} \\sum_{i = 1}^{k_{\\ast}} p_{i}^{\\ast}\nf(Y|(a_{i}^{\\ast})^{\\top}X+b_i^{\\ast},\\sigma_{i}^{\\ast})$, where $X, Y$ are\nrespectively a covariate vector and a response variable, $g_{0}(Y|X)$ is a\nknown function, $\\lambda^{\\ast} \\in [0, 1]$ is true but unknown mixing\nproportion, and $(p_{i}^{\\ast}, a_{i}^{\\ast}, b_{i}^{\\ast}, \\sigma_{i}^{\\ast})$\nfor $1 \\leq i \\leq k^{\\ast}$ are unknown parameters of the Gaussian mixture of\nexperts. This problem arises from the goodness-of-fit test when we would like\nto test whether the data are generated from $g_{0}(Y|X)$ (null hypothesis) or\nthey are generated from the whole mixture (alternative hypothesis). Based on\nthe algebraic structure of the expert functions and the distinguishability\nbetween $g_0$ and the mixture part, we construct novel Voronoi-based loss\nfunctions to capture the convergence rates of maximum likelihood estimation\n(MLE) for our models. We further demonstrate that our proposed loss functions\ncharacterize the local convergence rates of parameter estimation more\naccurately than the generalized Wasserstein, a loss function being commonly\nused for estimating parameters in the Gaussian mixture of experts.", + "authors": "Huy Nguyen, Khai Nguyen, Nhat Ho", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1809.04853v2", + "title": "Bayesian shrinkage in mixture of experts models: Identifying robust determinants of class membership", + "abstract": "A method for implicit variable selection in mixture of experts frameworks is\nproposed. We introduce a prior structure where information is taken from a set\nof independent covariates. Robust class membership predictors are identified\nusing a normal gamma prior. The resulting model setup is used in a finite\nmixture of Bernoulli distributions to find homogenous clusters of women in\nMozambique based on their information sources on HIV. Fully Bayesian inference\nis carried out via the implementation of a Gibbs sampler.", + "authors": "Gregor Zens", + "published": "2018-09-13", + "updated": "2019-01-12", + "primary_cat": "econ.EM", + "cats": [ + "econ.EM", + "62F15, 62J07, 62H30, 90-08" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1605.01652v1", + "title": "LSTM-based Mixture-of-Experts for Knowledge-Aware Dialogues", + "abstract": "We introduce an LSTM-based method for dynamically integrating several\nword-prediction experts to obtain a conditional language model which can be\ngood simultaneously at several subtasks. We illustrate this general approach\nwith an application to dialogue where we integrate a neural chat model, good at\nconversational aspects, with a neural question-answering model, good at\nretrieving precise information from a knowledge-base, and show how the\nintegration combines the strengths of the independent components. We hope that\nthis focused contribution will attract attention on the benefits of using such\nmixtures of experts in NLP.", + "authors": "Phong Le, Marc Dymetman, Jean-Michel Renders", + "published": "2016-05-05", + "updated": "2016-05-05", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.09179v1", + "title": "SiRA: Sparse Mixture of Low Rank Adaptation", + "abstract": "Parameter Efficient Tuning has been an prominent approach to adapt the Large\nLanguage Model to downstream tasks. Most previous works considers adding the\ndense trainable parameters, where all parameters are used to adapt certain\ntask. We found this less effective empirically using the example of LoRA that\nintroducing more trainable parameters does not help. Motivated by this we\ninvestigate the importance of leveraging \"sparse\" computation and propose SiRA:\nsparse mixture of low rank adaption. SiRA leverages the Sparse Mixture of\nExpert(SMoE) to boost the performance of LoRA. Specifically it enforces the top\n$k$ experts routing with a capacity limit restricting the maximum number of\ntokens each expert can process. We propose a novel and simple expert dropout on\ntop of gating network to reduce the over-fitting issue. Through extensive\nexperiments, we verify SiRA performs better than LoRA and other mixture of\nexpert approaches across different single tasks and multitask settings.", + "authors": "Yun Zhu, Nevan Wichers, Chu-Cheng Lin, Xinyi Wang, Tianlong Chen, Lei Shu, Han Lu, Canoee Liu, Liangchen Luo, Jindong Chen, Lei Meng", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17749v1", + "title": "Multi-Task Dense Prediction via Mixture of Low-Rank Experts", + "abstract": "Previous multi-task dense prediction methods based on the Mixture of Experts\n(MoE) have received great performance but they neglect the importance of\nexplicitly modeling the global relations among all tasks. In this paper, we\npresent a novel decoder-focused method for multi-task dense prediction, called\nMixture-of-Low-Rank-Experts (MLoRE). To model the global task relationships,\nMLoRE adds a generic convolution path to the original MoE structure, where each\ntask feature can go through this path for explicit parameter sharing.\nFurthermore, to control the parameters and computational cost brought by the\nincrease in the number of experts, we take inspiration from LoRA and propose to\nleverage the low-rank format of a vanilla convolution in the expert network.\nSince the low-rank experts have fewer parameters and can be dynamically\nparameterized into the generic convolution, the parameters and computational\ncost do not change much with the increase of experts. Benefiting from this\ndesign, we increase the number of experts and its reception field to enlarge\nthe representation capacity, facilitating multiple dense tasks learning in a\nunified network. Extensive experiments on the PASCAL-Context and NYUD-v2\nbenchmarks show that our MLoRE achieves superior performance compared to\nprevious state-of-the-art methods on all metrics. Our code is available at\nhttps://github.com/YuqiYang213/MLoRE.", + "authors": "Yuqi Yang, Peng-Tao Jiang, Qibin Hou, Hao Zhang, Jinwei Chen, Bo Li", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.00372v1", + "title": "Visual Saliency Prediction Using a Mixture of Deep Neural Networks", + "abstract": "Visual saliency models have recently begun to incorporate deep learning to\nachieve predictive capacity much greater than previous unsupervised methods.\nHowever, most existing models predict saliency using local mechanisms limited\nto the receptive field of the network. We propose a model that incorporates\nglobal scene semantic information in addition to local information gathered by\na convolutional neural network. Our model is formulated as a mixture of\nexperts. Each expert network is trained to predict saliency for a set of\nclosely related images. The final saliency map is computed as a weighted\nmixture of the expert networks' output, with weights determined by a separate\ngating network. This gating network is guided by global scene information to\npredict weights. The expert networks and the gating network are trained\nsimultaneously in an end-to-end manner. We show that our mixture formulation\nleads to improvement in performance over an otherwise identical non-mixture\nmodel that does not incorporate global scene information.", + "authors": "Samuel Dodge, Lina Karam", + "published": "2017-02-01", + "updated": "2017-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12550v1", + "title": "Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization", + "abstract": "The Mixture of Experts (MoE) paradigm provides a powerful way to decompose\ninscrutable dense layers into smaller, modular computations often more amenable\nto human interpretation, debugging, and editability. A major problem however\nlies in the computational cost of scaling the number of experts to achieve\nsufficiently fine-grained specialization. In this paper, we propose the\nMultilinear Mixutre of Experts (MMoE) layer to address this, focusing on vision\nmodels. MMoE layers perform an implicit computation on prohibitively large\nweight tensors entirely in factorized form. Consequently, MMoEs both (1) avoid\nthe issues incurred through the discrete expert routing in the popular 'sparse'\nMoE models, yet (2) do not incur the restrictively high inference-time costs of\n'soft' MoE alternatives. We present both qualitative and quantitative evidence\n(through visualization and counterfactual interventions respectively) that\nscaling MMoE layers when fine-tuning foundation models for vision tasks leads\nto more specialized experts at the class-level whilst remaining competitive\nwith the performance of parameter-matched linear layer counterparts. Finally,\nwe show that learned expert specialism further facilitates manual correction of\ndemographic bias in CelebA attribute classification. Our MMoE model code is\navailable at https://github.com/james-oldfield/MMoE.", + "authors": "James Oldfield, Markos Georgopoulos, Grigorios G. Chrysos, Christos Tzelepis, Yannis Panagakis, Mihalis A. Nicolaou, Jiankang Deng, Ioannis Patras", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1905.12969v1", + "title": "Enriched Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts probabilistically divide the input space into regions,\nwhere the assumptions of each expert, or conditional model, need only hold\nlocally. Combined with Gaussian process (GP) experts, this results in a\npowerful and highly flexible model. We focus on alternative mixtures of GP\nexperts, which model the joint distribution of the inputs and targets\nexplicitly. We highlight issues of this approach in multi-dimensional input\nspaces, namely, poor scalability and the need for an unnecessarily large number\nof experts, degrading the predictive performance and increasing uncertainty. We\nconstruct a novel model to address these issues through a nested partitioning\nscheme that automatically infers the number of components at both levels.\nMultiple response types are accommodated through a generalised GP framework,\nwhile multiple input types are included through a factorised exponential family\nstructure. We show the effectiveness of our approach in estimating a\nparsimonious probabilistic description of both synthetic data of increasing\ndimension and an Alzheimer's challenge dataset.", + "authors": "Charles W. L. Gadd, Sara Wade, Alexis Boukouvalas", + "published": "2019-05-30", + "updated": "2019-05-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.12379v4", + "title": "Mixture of Cluster-conditional LoRA Experts for Vision-language Instruction Tuning", + "abstract": "Instruction tuning of Large Vision-language Models (LVLMs) has revolutionized\nthe development of versatile models with zero-shot generalization across a wide\nrange of downstream vision-language tasks. However, the diversity of training\ntasks of different sources and formats would lead to inevitable task conflicts,\nwhere different tasks conflict for the same set of model parameters, resulting\nin sub-optimal instructionfollowing abilities. To address that, we propose the\nMixture of Clusterconditional LoRA Experts (MoCLE), a novel Mixture of Experts\n(MoE) architecture designed to activate the task-customized model parameters\nbased on the instruction clusters. A separate universal expert is further\nincorporated to improve generalization capabilities of MoCLE for novel\ninstructions. Extensive experiments on 11 zero-shot tasks demonstrate the\neffectiveness of MoCLE.", + "authors": "Yunhao Gou, Zhili Liu, Kai Chen, Lanqing Hong, Hang Xu, Aoxue Li, Dit-Yan Yeung, James T. Kwok, Yu Zhang", + "published": "2023-12-19", + "updated": "2024-03-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2006.13309v4", + "title": "Fast Deep Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts have become an indispensable tool for flexible modelling\nin a supervised learning context, allowing not only the mean function but the\nentire density of the output to change with the inputs. Sparse Gaussian\nprocesses (GP) have shown promise as a leading candidate for the experts in\nsuch models, and in this article, we propose to design the gating network for\nselecting the experts from such mixtures of sparse GPs using a deep neural\nnetwork (DNN). Furthermore, a fast one pass algorithm called\nCluster-Classify-Regress (CCR) is leveraged to approximate the maximum a\nposteriori (MAP) estimator extremely quickly. This powerful combination of\nmodel and algorithm together delivers a novel method which is flexible, robust,\nand extremely efficient. In particular, the method is able to outperform\ncompeting methods in terms of accuracy and uncertainty quantification. The cost\nis competitive on low-dimensional and small data sets, but is significantly\nlower for higher-dimensional and big data sets. Iteratively maximizing the\ndistribution of experts given allocations and allocations given experts does\nnot provide significant improvement, which indicates that the algorithm\nachieves a good approximation to the local MAP estimator very fast. This\ninsight can be useful also in the context of other mixture of experts models.", + "authors": "Clement Etienam, Kody Law, Sara Wade, Vitaly Zankin", + "published": "2020-06-11", + "updated": "2023-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1903.07756v1", + "title": "Hierarchical Routing Mixture of Experts", + "abstract": "In regression tasks the distribution of the data is often too complex to be\nfitted by a single model. In contrast, partition-based models are developed\nwhere data is divided and fitted by local models. These models partition the\ninput space and do not leverage the input-output dependency of\nmultimodal-distributed data, and strong local models are needed to make good\npredictions. Addressing these problems, we propose a binary tree-structured\nhierarchical routing mixture of experts (HRME) model that has classifiers as\nnon-leaf node experts and simple regression models as leaf node experts. The\nclassifier nodes jointly soft-partition the input-output space based on the\nnatural separateness of multimodal data. This enables simple leaf experts to be\neffective for prediction. Further, we develop a probabilistic framework for the\nHRME model, and propose a recursive Expectation-Maximization (EM) based\nalgorithm to learn both the tree structure and the expert models. Experiments\non a collection of regression tasks validate the effectiveness of our method\ncompared to a variety of other regression models.", + "authors": "Wenbo Zhao, Yang Gao, Shahan Ali Memon, Bhiksha Raj, Rita Singh", + "published": "2019-03-18", + "updated": "2019-03-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.05238v3", + "title": "Universal Simultaneous Machine Translation with Mixture-of-Experts Wait-k Policy", + "abstract": "Simultaneous machine translation (SiMT) generates translation before reading\nthe entire source sentence and hence it has to trade off between translation\nquality and latency. To fulfill the requirements of different translation\nquality and latency in practical applications, the previous methods usually\nneed to train multiple SiMT models for different latency levels, resulting in\nlarge computational costs. In this paper, we propose a universal SiMT model\nwith Mixture-of-Experts Wait-k Policy to achieve the best translation quality\nunder arbitrary latency with only one trained model. Specifically, our method\nemploys multi-head attention to accomplish the mixture of experts where each\nhead is treated as a wait-k expert with its own waiting words number, and given\na test latency and source inputs, the weights of the experts are accordingly\nadjusted to produce the best translation. Experiments on three datasets show\nthat our method outperforms all the strong baselines under different latency,\nincluding the state-of-the-art adaptive policy.", + "authors": "Shaolei Zhang, Yang Feng", + "published": "2021-09-11", + "updated": "2022-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.02043v1", + "title": "mixdistreg: An R Package for Fitting Mixture of Experts Distributional Regression with Adaptive First-order Methods", + "abstract": "This paper presents a high-level description of the R software package\nmixdistreg to fit mixture of experts distributional regression models. The\nproposed framework is implemented in R using the deepregression software\ntemplate, which is based on TensorFlow and follows the neural structured\nadditive learning principle. The software comprises various approaches as\nspecial cases, including mixture density networks and mixture regression\napproaches. Various code examples are given to demonstrate the package's\nfunctionality.", + "authors": "David R\u00fcgamer", + "published": "2023-02-04", + "updated": "2023-02-04", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.06966v1", + "title": "Acquiring Diverse Skills using Curriculum Reinforcement Learning with Mixture of Experts", + "abstract": "Reinforcement learning (RL) is a powerful approach for acquiring a\ngood-performing policy. However, learning diverse skills is challenging in RL\ndue to the commonly used Gaussian policy parameterization. We propose\n\\textbf{Di}verse \\textbf{Skil}l \\textbf{L}earning (Di-SkilL), an RL method for\nlearning diverse skills using Mixture of Experts, where each expert formalizes\na skill as a contextual motion primitive. Di-SkilL optimizes each expert and\nits associate context distribution to a maximum entropy objective that\nincentivizes learning diverse skills in similar contexts. The per-expert\ncontext distribution enables automatic curricula learning, allowing each expert\nto focus on its best-performing sub-region of the context space. To overcome\nhard discontinuities and multi-modalities without any prior knowledge of the\nenvironment's unknown context probability space, we leverage energy-based\nmodels to represent the per-expert context distributions and demonstrate how we\ncan efficiently train them using the standard policy gradient objective. We\nshow on challenging robot simulation tasks that Di-SkilL can learn diverse and\nperformant skills.", + "authors": "Onur Celik, Aleksandar Taranovic, Gerhard Neumann", + "published": "2024-03-11", + "updated": "2024-03-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05526v1", + "title": "Buffer Overflow in Mixture of Experts", + "abstract": "Mixture of Experts (MoE) has become a key ingredient for scaling large\nfoundation models while keeping inference costs steady. We show that expert\nrouting strategies that have cross-batch dependencies are vulnerable to\nattacks. Malicious queries can be sent to a model and can affect a model's\noutput on other benign queries if they are grouped in the same batch. We\ndemonstrate this via a proof-of-concept attack in a toy experimental setting.", + "authors": "Jamie Hayes, Ilia Shumailov, Itay Yona", + "published": "2024-02-08", + "updated": "2024-02-08", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2011.01613v1", + "title": "Towards a Universal Gating Network for Mixtures of Experts", + "abstract": "The combination and aggregation of knowledge from multiple neural networks\ncan be commonly seen in the form of mixtures of experts. However, such\ncombinations are usually done using networks trained on the same tasks, with\nlittle mention of the combination of heterogeneous pre-trained networks,\nespecially in the data-free regime. This paper proposes multiple data-free\nmethods for the combination of heterogeneous neural networks, ranging from the\nutilization of simple output logit statistics, to training specialized gating\nnetworks. The gating networks decide whether specific inputs belong to specific\nnetworks based on the nature of the expert activations generated. The\nexperiments revealed that the gating networks, including the universal gating\napproach, constituted the most accurate approach, and therefore represent a\npragmatic step towards applications with heterogeneous mixtures of experts in a\ndata-free regime. The code for this project is hosted on github at\nhttps://github.com/cwkang1998/network-merging.", + "authors": "Chen Wen Kang, Chua Meng Hong, Tomas Maul", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2004.03751v4", + "title": "Robust Fitting of Mixture Models using Weighted Complete Estimating Equations", + "abstract": "Mixture modeling, which considers the potential heterogeneity in data, is\nwidely adopted for classification and clustering problems. Mixture models can\nbe estimated using the Expectation-Maximization algorithm, which works with the\ncomplete estimating equations conditioned by the latent membership variables of\nthe cluster assignment based on the hierarchical expression of mixture models.\nHowever, when the mixture components have light tails such as a normal\ndistribution, the mixture model can be sensitive to outliers. This study\nproposes a method of weighted complete estimating equations (WCE) for the\nrobust fitting of mixture models. Our WCE introduces weights to complete\nestimating equations such that the weights can automatically downweight the\noutliers. The weights are constructed similarly to the density power divergence\nfor mixture models, but in our WCE, they depend only on the component\ndistributions and not on the whole mixture. A novel\nexpectation-estimating-equation (EEE) algorithm is also developed to solve the\nWCE. For illustrative purposes, a multivariate Gaussian mixture, a mixture of\nexperts, and a multivariate skew normal mixture are considered, and how our EEE\nalgorithm can be implemented for these specific models is described. The\nnumerical performance of the proposed robust estimation method was examined\nusing simulated and real datasets.", + "authors": "Shonosuke Sugasawa, Genya Kobayashi", + "published": "2020-04-08", + "updated": "2022-03-17", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1612.06879v1", + "title": "Robust mixture of experts modeling using the skew $t$ distribution", + "abstract": "Mixture of Experts (MoE) is a popular framework in the fields of statistics\nand machine learning for modeling heterogeneity in data for regression,\nclassification and clustering. MoE for continuous data are usually based on the\nnormal distribution. However, it is known that for data with asymmetric\nbehavior, heavy tails and atypical observations, the use of the normal\ndistribution is unsuitable. We introduce a new robust non-normal mixture of\nexperts modeling using the skew $t$ distribution. The proposed skew $t$ mixture\nof experts, named STMoE, handles these issues of the normal mixtures experts\nregarding possibly skewed, heavy-tailed and noisy data. We develop a dedicated\nexpectation conditional maximization (ECM) algorithm to estimate the model\nparameters by monotonically maximizing the observed data log-likelihood. We\ndescribe how the presented model can be used in prediction and in model-based\nclustering of regression data. Numerical experiments carried out on simulated\ndata show the effectiveness and the robustness of the proposed model in fitting\nnon-linear regression functions as well as in model-based clustering. Then, the\nproposed model is applied to the real-world data of tone perception for musical\ndata analysis, and the one of temperature anomalies for the analysis of climate\nchange data. The obtained results confirm the usefulness of the model for\npractical data analysis applications.", + "authors": "Faicel Chamroukhi", + "published": "2016-12-09", + "updated": "2016-12-09", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG", + "stat.ML", + "62, 62F, 62H30, 62h", + "G.3; I.2.6; I.5.1" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.12830v1", + "title": "Mixtures of Gaussian Process Experts with SMC$^2$", + "abstract": "Gaussian processes are a key component of many flexible statistical and\nmachine learning models. However, they exhibit cubic computational complexity\nand high memory constraints due to the need of inverting and storing a full\ncovariance matrix. To circumvent this, mixtures of Gaussian process experts\nhave been considered where data points are assigned to independent experts,\nreducing the complexity by allowing inference based on smaller, local\ncovariance matrices. Moreover, mixtures of Gaussian process experts\nsubstantially enrich the model's flexibility, allowing for behaviors such as\nnon-stationarity, heteroscedasticity, and discontinuities. In this work, we\nconstruct a novel inference approach based on nested sequential Monte Carlo\nsamplers to simultaneously infer both the gating network and Gaussian process\nexpert parameters. This greatly improves inference compared to importance\nsampling, particularly in settings when a stationary Gaussian process is\ninappropriate, while still being thoroughly parallelizable.", + "authors": "Teemu H\u00e4rk\u00f6nen, Sara Wade, Kody Law, Lassi Roininen", + "published": "2022-08-26", + "updated": "2022-08-26", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.07109v3", + "title": "Context-aware Mixture-of-Experts for Unbiased Scene Graph Generation", + "abstract": "Scene graph generation (SGG) has gained tremendous progress in recent years.\nHowever, its underlying long-tailed distribution of predicate classes is a\nchallenging problem. For extremely unbalanced predicate distributions, existing\napproaches usually construct complicated context encoders to extract the\nintrinsic relevance of scene context to predicates and complex networks to\nimprove the learning ability of network models for highly imbalanced predicate\ndistributions. To address the unbiased SGG problem, we introduce a simple yet\neffective method dubbed Context-Aware Mixture-of-Experts (CAME) to improve\nmodel diversity and mitigate biased SGG without complicated design.\nSpecifically, we propose to integrate the mixture of experts with a divide and\nensemble strategy to remedy the severely long-tailed distribution of predicate\nclasses, which is applicable to the majority of unbiased scene graph\ngenerators. The biased SGG is thereby reduced, and the model tends to\nanticipate more evenly distributed predicate predictions. To differentiate\nbetween various predicate distribution levels, experts with the same weights\nare not sufficiently diverse. In order to enable the network dynamically\nexploit the rich scene context and further boost the diversity of model, we\nsimply use the built-in module to create a context encoder. The importance of\neach expert to scene context and each predicate to each expert is dynamically\nassociated with expert weighting (EW) and predicate weighting (PW) strategy. We\nhave conducted extensive experiments on three tasks using the Visual Genome\ndataset, showing that CAME outperforms recent methods and achieves\nstate-of-the-art performance. Our code will be available publicly.", + "authors": "Liguang Zhou, Yuhongze Zhou, Tin Lun Lam, Yangsheng Xu", + "published": "2022-08-15", + "updated": "2023-01-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17404v1", + "title": "Generalization Error Analysis for Sparse Mixture-of-Experts: A Preliminary Study", + "abstract": "Mixture-of-Experts (MoE) represents an ensemble methodology that amalgamates\npredictions from several specialized sub-models (referred to as experts). This\nfusion is accomplished through a router mechanism, dynamically assigning\nweights to each expert's contribution based on the input data. Conventional MoE\nmechanisms select all available experts, incurring substantial computational\ncosts. In contrast, Sparse Mixture-of-Experts (Sparse MoE) selectively engages\nonly a limited number, or even just one expert, significantly reducing\ncomputation overhead while empirically preserving, and sometimes even\nenhancing, performance. Despite its wide-ranging applications and these\nadvantageous characteristics, MoE's theoretical underpinnings have remained\nelusive. In this paper, we embark on an exploration of Sparse MoE's\ngeneralization error concerning various critical factors. Specifically, we\ninvestigate the impact of the number of data samples, the total number of\nexperts, the sparsity in expert selection, the complexity of the routing\nmechanism, and the complexity of individual experts. Our analysis sheds light\non \\textit{how \\textbf{sparsity} contributes to the MoE's generalization},\noffering insights from the perspective of classical learning theory.", + "authors": "Jinze Zhao, Peihao Wang, Zhangyang Wang", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2205.01848v2", + "title": "Optimizing Mixture of Experts using Dynamic Recompilations", + "abstract": "The Mixture of Experts architecture allows for outrageously large neural\nnetworks by scaling model parameter size independently from computational\ndemand (FLOPs). However, current DNN frameworks cannot effectively support the\ndynamic data flow in Mixture of Experts, and implementations on top of these\nframeworks need to use workarounds that introduce significant overheads. To\naddress the limitation of these frameworks, we present DynaMoE, a DNN library\nthat uses dynamic recompilations to optimize and adapt the use of computational\nresources to the dynamic needs of Mixture of Experts models. Our evaluation\nshows that DynaMoE achieves a 1.8x speedup and supports 2.3x larger model sizes\nwhen compared to existing MoE systems, even when not using recompilations. We\nthen present further optimizations enabled by dynamic recompilations that yield\nan additional 1.7x speedup while simultaneously reducing memory pressure and\nimproving model quality.", + "authors": "Ferdinand Kossmann, Zhihao Jia, Alex Aiken", + "published": "2022-05-04", + "updated": "2022-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.03292v1", + "title": "Enhancing Molecular Property Prediction via Mixture of Collaborative Experts", + "abstract": "Molecular Property Prediction (MPP) task involves predicting biochemical\nproperties based on molecular features, such as molecular graph structures,\ncontributing to the discovery of lead compounds in drug development. To address\ndata scarcity and imbalance in MPP, some studies have adopted Graph Neural\nNetworks (GNN) as an encoder to extract commonalities from molecular graphs.\nHowever, these approaches often use a separate predictor for each task,\nneglecting the shared characteristics among predictors corresponding to\ndifferent tasks. In response to this limitation, we introduce the GNN-MoCE\narchitecture. It employs the Mixture of Collaborative Experts (MoCE) as\npredictors, exploiting task commonalities while confronting the homogeneity\nissue in the expert pool and the decision dominance dilemma within the expert\ngroup. To enhance expert diversity for collaboration among all experts, the\nExpert-Specific Projection method is proposed to assign a unique projection\nperspective to each expert. To balance decision-making influence for\ncollaboration within the expert group, the Expert-Specific Loss is presented to\nintegrate individual expert loss into the weighted decision loss of the group\nfor more equitable training. Benefiting from the enhancements of MoCE in expert\ncreation, dynamic expert group formation, and experts' collaboration, our model\ndemonstrates superior performance over traditional methods on 24 MPP datasets,\nespecially in tasks with limited data or high imbalance.", + "authors": "Xu Yao, Shuang Liang, Songqiao Han, Hailiang Huang", + "published": "2023-12-06", + "updated": "2023-12-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "q-bio.QM" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09762v1", + "title": "Diversifying the Mixture-of-Experts Representation for Language Models with Orthogonal Optimizer", + "abstract": "The Mixture of Experts (MoE) has emerged as a highly successful technique in\ndeep learning, based on the principle of divide-and-conquer to maximize model\ncapacity without significant additional computational cost. Even in the era of\nlarge-scale language models (LLMs), MoE continues to play a crucial role, as\nsome researchers have indicated that GPT-4 adopts the MoE structure to ensure\ndiverse inference results. However, MoE is susceptible to performance\ndegeneracy, particularly evident in the issues of imbalance and homogeneous\nrepresentation among experts. While previous studies have extensively addressed\nthe problem of imbalance, the challenge of homogeneous representation remains\nunresolved. In this study, we shed light on the homogeneous representation\nproblem, wherein experts in the MoE fail to specialize and lack diversity,\nleading to frustratingly high similarities in their representations (up to 99%\nin a well-performed MoE model). This problem restricts the expressive power of\nthe MoE and, we argue, contradicts its original intention. To tackle this\nissue, we propose a straightforward yet highly effective solution: OMoE, an\northogonal expert optimizer. Additionally, we introduce an alternating training\nstrategy that encourages each expert to update in a direction orthogonal to the\nsubspace spanned by other experts. Our algorithm facilitates MoE training in\ntwo key ways: firstly, it explicitly enhances representation diversity, and\nsecondly, it implicitly fosters interaction between experts during orthogonal\nweights computation. Through extensive experiments, we demonstrate that our\nproposed optimization algorithm significantly improves the performance of\nfine-tuning the MoE model on the GLUE benchmark, SuperGLUE benchmark,\nquestion-answering task, and name entity recognition tasks.", + "authors": "Boan Liu, Liang Ding, Li Shen, Keqin Peng, Yu Cao, Dazhao Cheng, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-10-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.04693v2", + "title": "GraphMETRO: Mitigating Complex Graph Distribution Shifts via Mixture of Aligned Experts", + "abstract": "Graph data are inherently complex and heterogeneous, leading to a high\nnatural diversity of distributional shifts. However, it remains unclear how to\nbuild machine learning architectures that generalize to complex non-synthetic\ndistributional shifts naturally occurring in the real world. Here we develop\nGraphMETRO, a Graph Neural Network architecture, that reliably models natural\ndiversity and captures complex distributional shifts. GraphMETRO employs a\nMixture-of-Experts (MoE) architecture with a gating model and multiple expert\nmodels, where each expert model targets a specific distributional shift to\nproduce a shift-invariant representation, and the gating model identifies shift\ncomponents. Additionally, we design a novel objective that aligns the\nrepresentations from different expert models to ensure smooth optimization.\nGraphMETRO achieves state-of-the-art results on four datasets from GOOD\nbenchmark comprised of complex and natural real-world distribution shifts,\nimproving by 67% and 4.2% on WebKB and Twitch datasets.", + "authors": "Shirley Wu, Kaidi Cao, Bruno Ribeiro, James Zou, Jure Leskovec", + "published": "2023-12-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1704.00946v4", + "title": "Approximation results regarding the multiple-output mixture of linear experts model", + "abstract": "Mixture of experts (MoE) models are a class of artificial neural networks\nthat can be used for functional approximation and probabilistic modeling. An\nimportant class of MoE models is the class of mixture of linear experts (MoLE)\nmodels, where the expert functions map to real topological output spaces. There\nare a number of powerful approximation results regarding MoLE models, when the\noutput space is univariate. These results guarantee the ability of MoLE mean\nfunctions to approximate arbitrary continuous functions, and MoLE models\nthemselves to approximate arbitrary conditional probability density functions.\nWe utilize and extend upon the univariate approximation results in order to\nprove a pair of useful results for situations where the output spaces are\nmultivariate.", + "authors": "Hien D. Nguyen, Faicel Chamroukhi, Florence Forbes", + "published": "2017-04-04", + "updated": "2019-05-28", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2112.14397v2", + "title": "EvoMoE: An Evolutional Mixture-of-Experts Training Framework via Dense-To-Sparse Gate", + "abstract": "Mixture-of-experts (MoE) is becoming popular due to its success in improving\nthe model quality, especially in Transformers. By routing tokens with a sparse\ngate to a few experts (i.e., a small pieces of the full model), MoE can easily\nincrease the model parameters to a very large scale while keeping the\ncomputation cost in a constant level. Most existing works just initialize some\nrandom experts, set a fixed gating strategy (e.g., Top-k), and train the model\nfrom scratch in an ad-hoc way. We identify that these MoE models are suffering\nfrom the immature experts and unstable sparse gate, which are harmful to the\nconvergence performance. In this paper, we propose an efficient end-to-end MoE\ntraining framework called EvoMoE. EvoMoE starts from training one single expert\nand gradually evolves into a large and sparse MoE structure. EvoMoE mainly\ncontains two phases: the expert-diversify phase to train the base expert for a\nwhile and spawn multiple diverse experts from it, and the gate-sparsify phase\nto learn an adaptive sparse gate and activate a dynamic number of experts.\nEvoMoE naturally decouples the joint learning of both the experts and the\nsparse gate and focuses on learning the basic knowledge with a single expert at\nthe early training stage. Then it diversifies the experts and continues to\ntrain the MoE with a novel Dense-to-Sparse gate (DTS-Gate). Specifically,\ninstead of using a permanent sparse gate, DTS-Gate begins as a dense gate that\nroutes tokens to all experts, then gradually and adaptively becomes sparser\nwhile routes to fewer experts. Evaluations are conducted on three popular\nmodels and tasks, including RoBERTa for masked language modeling task, GPT for\nlanguage modeling task and Transformer for machine translation task. The\nresults show that EvoMoE outperforms existing baselines, including Switch, BASE\nLayer, Hash Layer and StableMoE.", + "authors": "Xiaonan Nie, Xupeng Miao, Shijie Cao, Lingxiao Ma, Qibin Liu, Jilong Xue, Youshan Miao, Yi Liu, Zhi Yang, Bin Cui", + "published": "2021-12-29", + "updated": "2022-10-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.13934v1", + "title": "Functional mixture-of-experts for classification", + "abstract": "We develop a mixtures-of-experts (ME) approach to the multiclass\nclassification where the predictors are univariate functions. It consists of a\nME model in which both the gating network and the experts network are\nconstructed upon multinomial logistic activation functions with functional\ninputs. We perform a regularized maximum likelihood estimation in which the\ncoefficient functions enjoy interpretable sparsity constraints on targeted\nderivatives. We develop an EM-Lasso like algorithm to compute the regularized\nMLE and evaluate the proposed approach on simulated and real data.", + "authors": "Nhat Thien Pham, Faicel Chamroukhi", + "published": "2022-02-28", + "updated": "2022-02-28", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.04377v2", + "title": "Convergence Rates for Gaussian Mixtures of Experts", + "abstract": "We provide a theoretical treatment of over-specified Gaussian mixtures of\nexperts with covariate-free gating networks. We establish the convergence rates\nof the maximum likelihood estimation (MLE) for these models. Our proof\ntechnique is based on a novel notion of \\emph{algebraic independence} of the\nexpert functions. Drawing on optimal transport theory, we establish a\nconnection between the algebraic independence and a certain class of partial\ndifferential equations (PDEs). Exploiting this connection allows us to derive\nconvergence rates and minimax lower bounds for parameter estimation.", + "authors": "Nhat Ho, Chiao-Yu Yang, Michael I. Jordan", + "published": "2019-07-09", + "updated": "2022-03-08", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "cs.LG", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.13833v2", + "title": "Mixtures of Gaussian process experts based on kernel stick-breaking processes", + "abstract": "Mixtures of Gaussian process experts is a class of models that can\nsimultaneously address two of the key limitations inherent in standard Gaussian\nprocesses: scalability and predictive performance. In particular, models that\nuse Dirichlet processes as gating functions permit straightforward\ninterpretation and automatic selection of the number of experts in a mixture.\nWhile the existing models are intuitive and capable of capturing\nnon-stationarity, multi-modality and heteroskedasticity, the simplicity of\ntheir gating functions may limit the predictive performance when applied to\ncomplex data-generating processes. Capitalising on the recent advancement in\nthe dependent Dirichlet processes literature, we propose a new mixture model of\nGaussian process experts based on kernel stick-breaking processes. Our model\nmaintains the intuitive appeal yet improve the performance of the existing\nmodels. To make it practical, we design a sampler for posterior computation\nbased on the slice sampling. The model behaviour and improved predictive\nperformance are demonstrated in experiments using six datasets.", + "authors": "Yuji Saikai, Khue-Dung Dang", + "published": "2023-04-26", + "updated": "2023-05-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.08245v1", + "title": "Scattered Mixture-of-Experts Implementation", + "abstract": "We present ScatterMoE, an implementation of Sparse Mixture-of-Experts (SMoE)\non GPUs. ScatterMoE builds upon existing implementations, and overcoming some\nof the limitations to improve inference and training speed, and memory\nfootprint. This implementation achieves this by avoiding padding and making\nexcessive copies of the input. We introduce ParallelLinear, the main component\nwe use to build our implementation and the various kernels used to speed up the\noperation. We benchmark our implementation against Megablocks, and show that it\nenables a higher throughput and lower memory footprint. We also show how\nParallelLinear enables extension of the Mixture-of-Experts concept by\ndemonstrating with an implementation of Mixture of Attention.", + "authors": "Shawn Tan, Yikang Shen, Rameswar Panda, Aaron Courville", + "published": "2024-03-13", + "updated": "2024-03-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09832v3", + "title": "Merging Experts into One: Improving Computational Efficiency of Mixture of Experts", + "abstract": "Scaling the size of language models usually leads to remarkable advancements\nin NLP tasks. But it often comes with a price of growing computational cost.\nAlthough a sparse Mixture of Experts (MoE) can reduce the cost by activating a\nsmall subset of parameters (e.g., one expert) for each input, its computation\nescalates significantly if increasing the number of activated experts, limiting\nits practical utility. Can we retain the advantages of adding more experts\nwithout substantially increasing the computational costs? In this paper, we\nfirst demonstrate the superiority of selecting multiple experts and then\npropose a computation-efficient approach called \\textbf{\\texttt{Merging Experts\ninto One}} (MEO), which reduces the computation cost to that of a single\nexpert. Extensive experiments show that MEO significantly improves\ncomputational efficiency, e.g., FLOPS drops from 72.0G of vanilla MoE to 28.6G\n(MEO). Moreover, we propose a token-level attention block that further enhances\nthe efficiency and performance of token-level MEO, e.g., 83.3\\% (MEO) vs.\n82.6\\% (vanilla MoE) average score on the GLUE benchmark. Our code will be\nreleased upon acceptance. Code will be released at:\n\\url{https://github.com/Shwai-He/MEO}.", + "authors": "Shwai He, Run-Ze Fan, Liang Ding, Li Shen, Tianyi Zhou, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-11-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2207.09094v1", + "title": "MoEC: Mixture of Expert Clusters", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its\npromising scaling capability with affordable computational overhead. MoE\nconverts dense layers into sparse experts, and utilizes a gated routing network\nto make experts conditionally activated. However, as the number of experts\ngrows, MoE with outrageous parameters suffers from overfitting and sparse data\nallocation. Such problems are especially severe on tasks with limited data,\nthus hindering the progress for MoE models to improve performance by scaling\nup. In this work, we propose Mixture of Expert Clusters - a general approach to\nenable expert layers to learn more diverse and appropriate knowledge by\nimposing variance-based constraints on the routing stage. We further propose a\ncluster-level expert dropout strategy specifically designed for the expert\ncluster structure. Our experiments reveal that MoEC could improve performance\non machine translation and natural language understanding tasks, and raise the\nperformance upper bound for scaling up experts under limited data. We also\nverify that MoEC plays a positive role in mitigating overfitting and sparse\ndata allocation.", + "authors": "Yuan Xie, Shaohan Huang, Tianyu Chen, Furu Wei", + "published": "2022-07-19", + "updated": "2022-07-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05838v1", + "title": "Liu-type Shrinkage Estimators for Mixture of Poisson Regressions with Experts: A Heart Disease Study", + "abstract": "Count data play a critical role in medical research, such as heart disease.\nThe Poisson regression model is a common technique for evaluating the impact of\na set of covariates on the count responses. The mixture of Poisson regression\nmodels with experts is a practical tool to exploit the covariates, not only to\nhandle the heterogeneity in the Poisson regressions but also to learn the\nmixing structure of the population. Multicollinearity is one of the most common\nchallenges with regression models, leading to ill-conditioned design matrices\nof Poisson regression components and expert classes. The maximum likelihood\nmethod produces unreliable and misleading estimates for the effects of the\ncovariates in multicollinearity. In this research, we develop Ridge and\nLiu-type methods as two shrinkage approaches to cope with the ill-conditioned\ndesign matrices of the mixture of Poisson regression models with experts.\nThrough various numerical studies, we demonstrate that the shrinkage methods\noffer more reliable estimates for the coefficients of the mixture model in\nmulticollinearity while maintaining the classification performance of the ML\nmethod. The shrinkage methods are finally applied to a heart study to analyze\nthe heart disease rate stages.", + "authors": "Elsayed Ghanem, Moein Yoosefi, Armin Hatefi", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "stat.CO", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05444v1", + "title": "Pushing Mixture of Experts to the Limit: Extremely Parameter Efficient MoE for Instruction Tuning", + "abstract": "The Mixture of Experts (MoE) is a widely known neural architecture where an\nensemble of specialized sub-models optimizes overall performance with a\nconstant computational cost. However, conventional MoEs pose challenges at\nscale due to the need to store all experts in memory. In this paper, we push\nMoE to the limit. We propose extremely parameter-efficient MoE by uniquely\ncombining MoE architecture with lightweight experts.Our MoE architecture\noutperforms standard parameter-efficient fine-tuning (PEFT) methods and is on\npar with full fine-tuning by only updating the lightweight experts -- less than\n1% of an 11B parameters model. Furthermore, our method generalizes to unseen\ntasks as it does not depend on any prior task knowledge. Our research\nunderscores the versatility of the mixture of experts architecture, showcasing\nits ability to deliver robust performance even when subjected to rigorous\nparameter constraints. Our code used in all the experiments is publicly\navailable here: https://github.com/for-ai/parameter-efficient-moe.", + "authors": "Ted Zadouri, Ahmet \u00dcst\u00fcn, Arash Ahmadian, Beyza Ermi\u015f, Acyr Locatelli, Sara Hooker", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1703.09302v1", + "title": "Speech Enhancement using a Deep Mixture of Experts", + "abstract": "In this study we present a Deep Mixture of Experts (DMoE) neural-network\narchitecture for single microphone speech enhancement. By contrast to most\nspeech enhancement algorithms that overlook the speech variability mainly\ncaused by phoneme structure, our framework comprises a set of deep neural\nnetworks (DNNs), each one of which is an 'expert' in enhancing a given speech\ntype corresponding to a phoneme. A gating DNN determines which expert is\nassigned to a given speech segment. A speech presence probability (SPP) is then\nobtained as a weighted average of the expert SPP decisions, with the weights\ndetermined by the gating DNN. A soft spectral attenuation, based on the SPP, is\nthen applied to enhance the noisy speech signal. The experts and the gating\ncomponents of the DMoE network are trained jointly. As part of the training,\nspeech clustering into different subsets is performed in an unsupervised\nmanner. Therefore, unlike previous methods, a phoneme-labeled database is not\nrequired for the training procedure. A series of experiments with different\nnoise types verified the applicability of the new algorithm to the task of\nspeech enhancement. The proposed scheme outperforms other schemes that either\ndo not consider phoneme structure or use a simpler training methodology.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2017-03-27", + "updated": "2017-03-27", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1409.4698v1", + "title": "A Mixtures-of-Experts Framework for Multi-Label Classification", + "abstract": "We develop a novel probabilistic approach for multi-label classification that\nis based on the mixtures-of-experts architecture combined with recently\nintroduced conditional tree-structured Bayesian networks. Our approach captures\ndifferent input-output relations from multi-label data using the efficient\ntree-structured classifiers, while the mixtures-of-experts architecture aims to\ncompensate for the tree-structured restrictions and build a more accurate\nmodel. We develop and present algorithms for learning the model from data and\nfor performing multi-label predictions on future data instances. Experiments on\nmultiple benchmark datasets demonstrate that our approach achieves highly\ncompetitive results and outperforms the existing state-of-the-art multi-label\nclassification methods.", + "authors": "Charmgil Hong, Iyad Batal, Milos Hauskrecht", + "published": "2014-09-16", + "updated": "2014-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "I.2.6" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1405.7624v1", + "title": "Simultaneous Feature and Expert Selection within Mixture of Experts", + "abstract": "A useful strategy to deal with complex classification scenarios is the\n\"divide and conquer\" approach. The mixture of experts (MOE) technique makes use\nof this strategy by joinly training a set of classifiers, or experts, that are\nspecialized in different regions of the input space. A global model, or gate\nfunction, complements the experts by learning a function that weights their\nrelevance in different parts of the input space. Local feature selection\nappears as an attractive alternative to improve the specialization of experts\nand gate function, particularly, for the case of high dimensional data. Our\nmain intuition is that particular subsets of dimensions, or subspaces, are\nusually more appropriate to classify instances located in different regions of\nthe input space. Accordingly, this work contributes with a regularized variant\nof MoE that incorporates an embedded process for local feature selection using\n$L1$ regularization, with a simultaneous expert selection. The experiments are\nstill pending.", + "authors": "Billy Peralta", + "published": "2014-05-29", + "updated": "2014-05-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.11449v2", + "title": "Dynamic Mixture of Experts Models for Online Prediction", + "abstract": "A mixture of experts models the conditional density of a response variable\nusing a mixture of regression models with covariate-dependent mixture weights.\nWe extend the finite mixture of experts model by allowing the parameters in\nboth the mixture components and the weights to evolve in time by following\nrandom walk processes. Inference for time-varying parameters in richly\nparameterized mixture of experts models is challenging. We propose a sequential\nMonte Carlo algorithm for online inference and based on a tailored proposal\ndistribution built on ideas from linear Bayes methods and the EM algorithm. The\nmethod gives a unified treatment for mixtures with time-varying parameters,\nincluding the special case of static parameters. We assess the properties of\nthe method on simulated data and on industrial data where the aim is to predict\nsoftware faults in a continuously upgraded large-scale software project.", + "authors": "Parfait Munezero, Mattias Villani, Robert Kohn", + "published": "2021-09-23", + "updated": "2022-10-13", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO", + "stat.AP" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02629v2", + "title": "BA-MoE: Boundary-Aware Mixture-of-Experts Adapter for Code-Switching Speech Recognition", + "abstract": "Mixture-of-experts based models, which use language experts to extract\nlanguage-specific representations effectively, have been well applied in\ncode-switching automatic speech recognition. However, there is still\nsubstantial space to improve as similar pronunciation across languages may\nresult in ineffective multi-language modeling and inaccurate language boundary\nestimation. To eliminate these drawbacks, we propose a cross-layer language\nadapter and a boundary-aware training method, namely Boundary-Aware\nMixture-of-Experts (BA-MoE). Specifically, we introduce language-specific\nadapters to separate language-specific representations and a unified gating\nlayer to fuse representations within each encoder layer. Second, we compute\nlanguage adaptation loss of the mean output of each language-specific adapter\nto improve the adapter module's language-specific representation learning.\nBesides, we utilize a boundary-aware predictor to learn boundary\nrepresentations for dealing with language boundary confusion. Our approach\nachieves significant performance improvement, reducing the mixture error rate\nby 16.55\\% compared to the baseline on the ASRU 2019 Mandarin-English\ncode-switching challenge dataset.", + "authors": "Peikun Chen, Fan Yu, Yuhao Lian, Hongfei Xue, Xucheng Wan, Naijun Zheng, Huan Zhou, Lei Xie", + "published": "2023-10-04", + "updated": "2023-10-08", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2107.04694v1", + "title": "Lifelong Mixture of Variational Autoencoders", + "abstract": "In this paper, we propose an end-to-end lifelong learning mixture of experts.\nEach expert is implemented by a Variational Autoencoder (VAE). The experts in\nthe mixture system are jointly trained by maximizing a mixture of individual\ncomponent evidence lower bounds (MELBO) on the log-likelihood of the given\ntraining samples. The mixing coefficients in the mixture, control the\ncontributions of each expert in the goal representation. These are sampled from\na Dirichlet distribution whose parameters are determined through non-parametric\nestimation during lifelong learning. The model can learn new tasks fast when\nthese are similar to those previously learnt. The proposed Lifelong mixture of\nVAE (L-MVAE) expands its architecture with new components when learning a\ncompletely new task. After the training, our model can automatically determine\nthe relevant expert to be used when fed with new data samples. This mechanism\nbenefits both the memory efficiency and the required computational cost as only\none expert is used during the inference. The L-MVAE inference model is able to\nperform interpolation in the joint latent space across the data domains\nassociated with different tasks and is shown to be efficient for disentangled\nlearning representation.", + "authors": "Fei Ye, Adrian G. Bors", + "published": "2021-07-09", + "updated": "2021-07-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2303.06318v2", + "title": "A Hybrid Tensor-Expert-Data Parallelism Approach to Optimize Mixture-of-Experts Training", + "abstract": "Mixture-of-Experts (MoE) is a neural network architecture that adds sparsely\nactivated expert blocks to a base model, increasing the number of parameters\nwithout impacting computational costs. However, current distributed deep\nlearning frameworks are limited in their ability to train high-quality MoE\nmodels with large base models. In this work, we present DeepSpeed-TED, a novel,\nthree-dimensional, hybrid parallel algorithm that combines data, tensor, and\nexpert parallelism to enable the training of MoE models with 4 to 8x larger\nbase models than the current state-of-the-art. We also describe memory\noptimizations in the optimizer step, and communication optimizations that\neliminate unnecessary data movement. We implement our approach in DeepSpeed and\nachieve speedups of 26% over a baseline (i.e. without our communication\noptimizations) when training a 40 billion parameter MoE model (6.7 billion base\nmodel with 16 experts) on 128 V100 GPUs.", + "authors": "Siddharth Singh, Olatunji Ruwase, Ammar Ahmad Awan, Samyam Rajbhandari, Yuxiong He, Abhinav Bhatele", + "published": "2023-03-11", + "updated": "2023-05-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.DC", + "cs.PF" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.05185v1", + "title": "Mixture of Weak & Strong Experts on Graphs", + "abstract": "Realistic graphs contain both rich self-features of nodes and informative\nstructures of neighborhoods, jointly handled by a GNN in the typical setup. We\npropose to decouple the two modalities by mixture of weak and strong experts\n(Mowst), where the weak expert is a light-weight Multi-layer Perceptron (MLP),\nand the strong expert is an off-the-shelf Graph Neural Network (GNN). To adapt\nthe experts' collaboration to different target nodes, we propose a \"confidence\"\nmechanism based on the dispersion of the weak expert's prediction logits. The\nstrong expert is conditionally activated when either the node's classification\nrelies on neighborhood information, or the weak expert has low model quality.\nWe reveal interesting training dynamics by analyzing the influence of the\nconfidence function on loss: our training algorithm encourages the\nspecialization of each expert by effectively generating soft splitting of the\ngraph. In addition, our \"confidence\" design imposes a desirable bias toward the\nstrong expert to benefit from GNN's better generalization capability. Mowst is\neasy to optimize and achieves strong expressive power, with a computation cost\ncomparable to a single GNN. Empirically, Mowst shows significant accuracy\nimprovement on 6 standard node classification benchmarks (including both\nhomophilous and heterophilous graphs).", + "authors": "Hanqing Zeng, Hanjia Lyu, Diyi Hu, Yinglong Xia, Jiebo Luo", + "published": "2023-11-09", + "updated": "2023-11-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12656v2", + "title": "HyperMoE: Paying Attention to Unselected Experts in Mixture of Experts via Dynamic Transfer", + "abstract": "The Mixture of Experts (MoE) for language models has been proven effective in\naugmenting the capacity of models by dynamically routing each input token to a\nspecific subset of experts for processing. Despite the success, most existing\nmethods face a challenge for balance between sparsity and the availability of\nexpert knowledge: enhancing performance through increased use of expert\nknowledge often results in diminishing sparsity during expert selection. To\nmitigate this contradiction, we propose HyperMoE, a novel MoE framework built\nupon Hypernetworks. This framework integrates the computational processes of\nMoE with the concept of knowledge transferring in multi-task learning. Specific\nmodules generated based on the information of unselected experts serve as\nsupplementary information, which allows the knowledge of experts not selected\nto be used while maintaining selection sparsity. Our comprehensive empirical\nevaluations across multiple datasets and backbones establish that HyperMoE\nsignificantly outperforms existing MoE methods under identical conditions\nconcerning the number of experts.", + "authors": "Hao Zhao, Zihan Qiu, Huijia Wu, Zili Wang, Zhaofeng He, Jie Fu", + "published": "2024-02-20", + "updated": "2024-02-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1110.2058v2", + "title": "Convergence Rates for Mixture-of-Experts", + "abstract": "In mixtures-of-experts (ME) model, where a number of submodels (experts) are\ncombined, there have been two longstanding problems: (i) how many experts\nshould be chosen, given the size of the training data? (ii) given the total\nnumber of parameters, is it better to use a few very complex experts, or is it\nbetter to combine many simple experts? In this paper, we try to provide some\ninsights to these problems through a theoretic study on a ME structure where\n$m$ experts are mixed, with each expert being related to a polynomial\nregression model of order $k$. We study the convergence rate of the maximum\nlikelihood estimator (MLE), in terms of how fast the Kullback-Leibler\ndivergence of the estimated density converges to the true density, when the\nsample size $n$ increases. The convergence rate is found to be dependent on\nboth $m$ and $k$, and certain choices of $m$ and $k$ are found to produce\noptimal convergence rates. Therefore, these results shed light on the two\naforementioned important problems: on how to choose $m$, and on how $m$ and $k$\nshould be compromised, for achieving good convergence rates.", + "authors": "Eduardo F. Mendes, Wenxin Jiang", + "published": "2011-10-10", + "updated": "2011-11-01", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.13850v2", + "title": "Statistical Perspective of Top-K Sparse Softmax Gating Mixture of Experts", + "abstract": "Top-K sparse softmax gating mixture of experts has been widely used for\nscaling up massive deep-learning architectures without increasing the\ncomputational cost. Despite its popularity in real-world applications, the\ntheoretical understanding of that gating function has remained an open problem.\nThe main challenge comes from the structure of the top-K sparse softmax gating\nfunction, which partitions the input space into multiple regions with distinct\nbehaviors. By focusing on a Gaussian mixture of experts, we establish\ntheoretical results on the effects of the top-K sparse softmax gating function\non both density and parameter estimations. Our results hinge upon defining\nnovel loss functions among parameters to capture different behaviors of the\ninput regions. When the true number of experts $k_{\\ast}$ is known, we\ndemonstrate that the convergence rates of density and parameter estimations are\nboth parametric on the sample size. However, when $k_{\\ast}$ becomes unknown\nand the true model is over-specified by a Gaussian mixture of $k$ experts where\n$k > k_{\\ast}$, our findings suggest that the number of experts selected from\nthe top-K sparse softmax gating function must exceed the total cardinality of a\ncertain number of Voronoi cells associated with the true parameters to\nguarantee the convergence of the density estimation. Moreover, while the\ndensity estimation rate remains parametric under this setting, the parameter\nestimation rates become substantially slow due to an intrinsic interaction\nbetween the softmax gating and expert functions.", + "authors": "Huy Nguyen, Pedram Akbarian, Fanqi Yan, Nhat Ho", + "published": "2023-09-25", + "updated": "2024-02-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.01334v2", + "title": "Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy", + "abstract": "Sparsely activated Mixture-of-Experts (SMoE) has shown promise to scale up\nthe learning capacity of neural networks, however, they have issues like (a)\nHigh Memory Usage, due to duplication of the network layers into multiple\ncopies as experts; and (b) Redundancy in Experts, as common learning-based\nrouting policies suffer from representational collapse. Therefore, vanilla SMoE\nmodels are memory inefficient and non-scalable, especially for\nresource-constrained downstream scenarios. In this paper, we ask: Can we craft\na compact SMoE model by consolidating expert information? What is the best\nrecipe to merge multiple experts into fewer but more knowledgeable experts? Our\npilot investigation reveals that conventional model merging methods fail to be\neffective in such expert merging for SMoE. The potential reasons are: (1)\nredundant information overshadows critical experts; (2) appropriate neuron\npermutation for each expert is missing to bring all of them in alignment. To\naddress this, we propose M-SMoE, which leverages routing statistics to guide\nexpert merging. Specifically, it starts with neuron permutation alignment for\nexperts; then, dominant experts and their \"group members\" are formed; lastly,\nevery expert group is merged into a single expert by utilizing each expert's\nactivation frequency as their weight for merging, thus diminishing the impact\nof insignificant experts. Moreover, we observed that our proposed merging\npromotes a low dimensionality in the merged expert's weight space, naturally\npaving the way for additional compression. Hence, our final method, MC-SMoE\n(i.e., Merge, then Compress SMoE), further decomposes the merged experts into\nlow-rank and structural sparse alternatives. Extensive experiments across 8\nbenchmarks validate the effectiveness of MC-SMoE. For instance, our MC-SMoE\nachieves up to 80% memory and a 20% FLOPs reduction, with virtually no loss in\nperformance.", + "authors": "Pingzhi Li, Zhenyu Zhang, Prateek Yadav, Yi-Lin Sung, Yu Cheng, Mohit Bansal, Tianlong Chen", + "published": "2023-10-02", + "updated": "2024-03-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.14976v4", + "title": "MoCaE: Mixture of Calibrated Experts Significantly Improves Object Detection", + "abstract": "Combining the strengths of many existing predictors to obtain a Mixture of\nExperts which is superior to its individual components is an effective way to\nimprove the performance without having to develop new architectures or train a\nmodel from scratch. However, surprisingly, we find that na\\\"ively combining\nexpert object detectors in a similar way to Deep Ensembles, can often lead to\ndegraded performance. We identify that the primary cause of this issue is that\nthe predictions of the experts do not match their performance, a term referred\nto as miscalibration. Consequently, the most confident detector dominates the\nfinal predictions, preventing the mixture from leveraging all the predictions\nfrom the experts appropriately. To address this, when constructing the Mixture\nof Experts, we propose to combine their predictions in a manner which reflects\nthe individual performance of the experts; an objective we achieve by first\ncalibrating the predictions before filtering and refining them. We term this\napproach the Mixture of Calibrated Experts and demonstrate its effectiveness\nthrough extensive experiments on 5 different detection tasks using a variety of\ndetectors, showing that it: (i) improves object detectors on COCO and instance\nsegmentation methods on LVIS by up to $\\sim 2.5$ AP; (ii) reaches\nstate-of-the-art on COCO test-dev with $65.1$ AP and on DOTA with $82.62$\n$\\mathrm{AP_{50}}$; (iii) outperforms single models consistently on recent\ndetection tasks such as Open Vocabulary Object Detection.", + "authors": "Kemal Oksuz, Selim Kuzucu, Tom Joy, Puneet K. Dokania", + "published": "2023-09-26", + "updated": "2024-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.03994v1", + "title": "Video Relationship Detection Using Mixture of Experts", + "abstract": "Machine comprehension of visual information from images and videos by neural\nnetworks faces two primary challenges. Firstly, there exists a computational\nand inference gap in connecting vision and language, making it difficult to\naccurately determine which object a given agent acts on and represent it\nthrough language. Secondly, classifiers trained by a single, monolithic neural\nnetwork often lack stability and generalization. To overcome these challenges,\nwe introduce MoE-VRD, a novel approach to visual relationship detection\nutilizing a mixture of experts. MoE-VRD identifies language triplets in the\nform of < subject, predicate, object> tuples to extract relationships from\nvisual processing. Leveraging recent advancements in visual relationship\ndetection, MoE-VRD addresses the requirement for action recognition in\nestablishing relationships between subjects (acting) and objects (being acted\nupon). In contrast to single monolithic networks, MoE-VRD employs multiple\nsmall models as experts, whose outputs are aggregated. Each expert in MoE-VRD\nspecializes in visual relationship learning and object tagging. By utilizing a\nsparsely-gated mixture of experts, MoE-VRD enables conditional computation and\nsignificantly enhances neural network capacity without increasing computational\ncomplexity. Our experimental results demonstrate that the conditional\ncomputation capabilities and scalability of the mixture-of-experts approach\nlead to superior performance in visual relationship detection compared to\nstate-of-the-art methods.", + "authors": "Ala Shaabana, Zahra Gharaee, Paul Fieguth", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1806.08200v1", + "title": "Mixtures of Experts Models", + "abstract": "Mixtures of experts models provide a framework in which covariates may be\nincluded in mixture models. This is achieved by modelling the parameters of the\nmixture model as functions of the concomitant covariates. Given their mixture\nmodel foundation, mixtures of experts models possess a diverse range of\nanalytic uses, from clustering observations to capturing parameter\nheterogeneity in cross-sectional data. This chapter focuses on delineating the\nmixture of experts modelling framework and demonstrates the utility and\nflexibility of mixtures of experts models as an analytic tool.", + "authors": "Isobel Claire Gormley, Sylvia Fr\u00fchwirth-Schnatter", + "published": "2018-06-21", + "updated": "2018-06-21", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2008.09662v1", + "title": "Biased Mixtures Of Experts: Enabling Computer Vision Inference Under Data Transfer Limitations", + "abstract": "We propose a novel mixture-of-experts class to optimize computer vision\nmodels in accordance with data transfer limitations at test time. Our approach\npostulates that the minimum acceptable amount of data allowing for\nhighly-accurate results can vary for different input space partitions.\nTherefore, we consider mixtures where experts require different amounts of\ndata, and train a sparse gating function to divide the input space for each\nexpert. By appropriate hyperparameter selection, our approach is able to bias\nmixtures of experts towards selecting specific experts over others. In this\nway, we show that the data transfer optimization between visual sensing and\nprocessing can be solved as a convex optimization problem.To demonstrate the\nrelation between data availability and performance, we evaluate biased mixtures\non a range of mainstream computer vision problems, namely: (i) single shot\ndetection, (ii) image super resolution, and (iii) realtime video action\nclassification. For all cases, and when experts constitute modified baselines\nto meet different limits on allowed data utility, biased mixtures significantly\noutperform previous work optimized to meet the same constraints on available\ndata.", + "authors": "Alhabib Abbas, Yiannis Andreopoulos", + "published": "2020-08-21", + "updated": "2020-08-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.IV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1901.10668v2", + "title": "Doubly Sparse: Sparse Mixture of Sparse Experts for Efficient Softmax Inference", + "abstract": "Computations for the softmax function are significantly expensive when the\nnumber of output classes is large. In this paper, we present a novel softmax\ninference speedup method, Doubly Sparse Softmax (DS-Softmax), that leverages\nsparse mixture of sparse experts to efficiently retrieve top-k classes.\nDifferent from most existing methods that require and approximate a fixed\nsoftmax, our method is learning-based and can adapt softmax weights for a\nbetter inference speedup. In particular, our method learns a two-level\nhierarchy which divides entire output class space into several partially\noverlapping experts. Each expert is sparse and only contains a subset of output\nclasses. To find top-k classes, a sparse mixture enables us to find the most\nprobable expert quickly, and the sparse expert enables us to search within a\nsmall-scale softmax. We empirically conduct evaluation on several real-world\ntasks, including neural machine translation, language modeling and image\nclassification, and demonstrate that significant computation reductions can be\nachieved at no performance loss.", + "authors": "Shun Liao, Ting Chen, Tian Lin, Denny Zhou, Chong Wang", + "published": "2019-01-30", + "updated": "2019-07-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + } + ] + ] + }, + { + "url": "http://arxiv.org/abs/2010.11929v2", + "title": "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", + "abstract": "While the Transformer architecture has become the de-facto standard for\nnatural language processing tasks, its applications to computer vision remain\nlimited. In vision, attention is either applied in conjunction with\nconvolutional networks, or used to replace certain components of convolutional\nnetworks while keeping their overall structure in place. We show that this\nreliance on CNNs is not necessary and a pure transformer applied directly to\nsequences of image patches can perform very well on image classification tasks.\nWhen pre-trained on large amounts of data and transferred to multiple mid-sized\nor small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision\nTransformer (ViT) attains excellent results compared to state-of-the-art\nconvolutional networks while requiring substantially fewer computational\nresources to train.", + "authors": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby", + "published": "2020-10-22", + "updated": "2021-06-03", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2403.10854v1", + "title": "A Comprehensive Study of Multimodal Large Language Models for Image Quality Assessment", + "abstract": "While Multimodal Large Language Models (MLLMs) have experienced significant\nadvancement on visual understanding and reasoning, their potentials to serve as\npowerful, flexible, interpretable, and text-driven models for Image Quality\nAssessment (IQA) remains largely unexplored. In this paper, we conduct a\ncomprehensive and systematic study of prompting MLLMs for IQA. Specifically, we\nfirst investigate nine prompting systems for MLLMs as the combinations of three\nstandardized testing procedures in psychophysics (i.e., the single-stimulus,\ndouble-stimulus, and multiple-stimulus methods) and three popular prompting\nstrategies in natural language processing (i.e., the standard, in-context, and\nchain-of-thought prompting). We then present a difficult sample selection\nprocedure, taking into account sample diversity and uncertainty, to further\nchallenge MLLMs equipped with the respective optimal prompting systems. We\nassess three open-source and one close-source MLLMs on several visual\nattributes of image quality (e.g., structural and textural distortions, color\ndifferences, and geometric transformations) in both full-reference and\nno-reference scenarios. Experimental results show that only the close-source\nGPT-4V provides a reasonable account for human perception of image quality, but\nis weak at discriminating fine-grained quality variations (e.g., color\ndifferences) and at comparing visual quality of multiple images, tasks humans\ncan perform effortlessly.", + "authors": "Tianhe Wu, Kede Ma, Jie Liang, Yujiu Yang, Lei Zhang", + "published": "2024-03-16", + "updated": "2024-03-16", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.17090v1", + "title": "Q-Align: Teaching LMMs for Visual Scoring via Discrete Text-Defined Levels", + "abstract": "The explosion of visual content available online underscores the requirement\nfor an accurate machine assessor to robustly evaluate scores across diverse\ntypes of visual contents. While recent studies have demonstrated the\nexceptional potentials of large multi-modality models (LMMs) on a wide range of\nrelated fields, in this work, we explore how to teach them for visual rating\naligned with human opinions. Observing that human raters only learn and judge\ndiscrete text-defined levels in subjective studies, we propose to emulate this\nsubjective process and teach LMMs with text-defined rating levels instead of\nscores. The proposed Q-Align achieves state-of-the-art performance on image\nquality assessment (IQA), image aesthetic assessment (IAA), as well as video\nquality assessment (VQA) tasks under the original LMM structure. With the\nsyllabus, we further unify the three tasks into one model, termed the OneAlign.\nIn our experiments, we demonstrate the advantage of the discrete-level-based\nsyllabus over direct-score-based variants for LMMs. Our code and the\npre-trained weights are released at https://github.com/Q-Future/Q-Align.", + "authors": "Haoning Wu, Zicheng Zhang, Weixia Zhang, Chaofeng Chen, Liang Liao, Chunyi Li, Yixuan Gao, Annan Wang, Erli Zhang, Wenxiu Sun, Qiong Yan, Xiongkuo Min, Guangtao Zhai, Weisi Lin", + "published": "2023-12-28", + "updated": "2023-12-28", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2403.18714v1", + "title": "Bringing Textual Prompt to AI-Generated Image Quality Assessment", + "abstract": "AI-Generated Images (AGIs) have inherent multimodal nature. Unlike\ntraditional image quality assessment (IQA) on natural scenarios, AGIs quality\nassessment (AGIQA) takes the correspondence of image and its textual prompt\ninto consideration. This is coupled in the ground truth score, which confuses\nthe unimodal IQA methods. To solve this problem, we introduce IP-IQA (AGIs\nQuality Assessment via Image and Prompt), a multimodal framework for AGIQA via\ncorresponding image and prompt incorporation. Specifically, we propose a novel\nincremental pretraining task named Image2Prompt for better understanding of\nAGIs and their corresponding textual prompts. An effective and efficient\nimage-prompt fusion module, along with a novel special [QA] token, are also\napplied. Both are plug-and-play and beneficial for the cooperation of image and\nits corresponding prompt. Experiments demonstrate that our IP-IQA achieves the\nstate-of-the-art on AGIQA-1k and AGIQA-3k datasets. Code will be available.", + "authors": "Bowen Qu, Haohui Li, Wei Gao", + "published": "2024-03-27", + "updated": "2024-03-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.MM" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2101.01097v2", + "title": "Transformer for Image Quality Assessment", + "abstract": "Transformer has become the new standard method in natural language processing\n(NLP), and it also attracts research interests in computer vision area. In this\npaper we investigate the application of Transformer in Image Quality (TRIQ)\nassessment. Following the original Transformer encoder employed in Vision\nTransformer (ViT), we propose an architecture of using a shallow Transformer\nencoder on the top of a feature map extracted by convolution neural networks\n(CNN). Adaptive positional embedding is employed in the Transformer encoder to\nhandle images with arbitrary resolutions. Different settings of Transformer\narchitectures have been investigated on publicly available image quality\ndatabases. We have found that the proposed TRIQ architecture achieves\noutstanding performance. The implementation of TRIQ is published on Github\n(https://github.com/junyongyou/triq).", + "authors": "Junyong You, Jari Korhonen", + "published": "2020-12-30", + "updated": "2021-01-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG", + "eess.IV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2108.05997v1", + "title": "MUSIQ: Multi-scale Image Quality Transformer", + "abstract": "Image quality assessment (IQA) is an important research topic for\nunderstanding and improving visual experience. The current state-of-the-art IQA\nmethods are based on convolutional neural networks (CNNs). The performance of\nCNN-based models is often compromised by the fixed shape constraint in batch\ntraining. To accommodate this, the input images are usually resized and cropped\nto a fixed shape, causing image quality degradation. To address this, we design\na multi-scale image quality Transformer (MUSIQ) to process native resolution\nimages with varying sizes and aspect ratios. With a multi-scale image\nrepresentation, our proposed method can capture image quality at different\ngranularities. Furthermore, a novel hash-based 2D spatial embedding and a scale\nembedding is proposed to support the positional embedding in the multi-scale\nrepresentation. Experimental results verify that our method can achieve\nstate-of-the-art performance on multiple large scale IQA datasets such as\nPaQ-2-PiQ, SPAQ and KonIQ-10k.", + "authors": "Junjie Ke, Qifei Wang, Yilin Wang, Peyman Milanfar, Feng Yang", + "published": "2021-08-12", + "updated": "2021-08-12", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2309.14181v3", + "title": "Q-Bench: A Benchmark for General-Purpose Foundation Models on Low-level Vision", + "abstract": "The rapid evolution of Multi-modality Large Language Models (MLLMs) has\ncatalyzed a shift in computer vision from specialized models to general-purpose\nfoundation models. Nevertheless, there is still an inadequacy in assessing the\nabilities of MLLMs on low-level visual perception and understanding. To address\nthis gap, we present Q-Bench, a holistic benchmark crafted to systematically\nevaluate potential abilities of MLLMs on three realms: low-level visual\nperception, low-level visual description, and overall visual quality\nassessment. a) To evaluate the low-level perception ability, we construct the\nLLVisionQA dataset, consisting of 2,990 diverse-sourced images, each equipped\nwith a human-asked question focusing on its low-level attributes. We then\nmeasure the correctness of MLLMs on answering these questions. b) To examine\nthe description ability of MLLMs on low-level information, we propose the\nLLDescribe dataset consisting of long expert-labelled golden low-level text\ndescriptions on 499 images, and a GPT-involved comparison pipeline between\noutputs of MLLMs and the golden descriptions. c) Besides these two tasks, we\nfurther measure their visual quality assessment ability to align with human\nopinion scores. Specifically, we design a softmax-based strategy that enables\nMLLMs to predict quantifiable quality scores, and evaluate them on various\nexisting image quality assessment (IQA) datasets. Our evaluation across the\nthree abilities confirms that MLLMs possess preliminary low-level visual\nskills. However, these skills are still unstable and relatively imprecise,\nindicating the need for specific enhancements on MLLMs towards these abilities.\nWe hope that our benchmark can encourage the research community to delve deeper\nto discover and enhance these untapped potentials of MLLMs. Project Page:\nhttps://q-future.github.io/Q-Bench.", + "authors": "Haoning Wu, Zicheng Zhang, Erli Zhang, Chaofeng Chen, Liang Liao, Annan Wang, Chunyi Li, Wenxiu Sun, Qiong Yan, Guangtao Zhai, Weisi Lin", + "published": "2023-09-25", + "updated": "2024-01-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.MM" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1907.02665v1", + "title": "Blind Image Quality Assessment Using A Deep Bilinear Convolutional Neural Network", + "abstract": "We propose a deep bilinear model for blind image quality assessment (BIQA)\nthat handles both synthetic and authentic distortions. Our model consists of\ntwo convolutional neural networks (CNN), each of which specializes in one\ndistortion scenario. For synthetic distortions, we pre-train a CNN to classify\nimage distortion type and level, where we enjoy large-scale training data. For\nauthentic distortions, we adopt a pre-trained CNN for image classification. The\nfeatures from the two CNNs are pooled bilinearly into a unified representation\nfor final quality prediction. We then fine-tune the entire model on target\nsubject-rated databases using a variant of stochastic gradient descent.\nExtensive experiments demonstrate that the proposed model achieves superior\nperformance on both synthetic and authentic databases. Furthermore, we verify\nthe generalizability of our method on the Waterloo Exploration Database using\nthe group maximum differentiation competition.", + "authors": "Weixia Zhang, Kede Ma, Jia Yan, Dexiang Deng, Zhou Wang", + "published": "2019-07-05", + "updated": "2019-07-05", + "primary_cat": "eess.IV", + "cats": [ + "eess.IV", + "cs.CV", + "cs.MM" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2402.03413v1", + "title": "Perceptual Video Quality Assessment: A Survey", + "abstract": "Perceptual video quality assessment plays a vital role in the field of video\nprocessing due to the existence of quality degradations introduced in various\nstages of video signal acquisition, compression, transmission and display. With\nthe advancement of internet communication and cloud service technology, video\ncontent and traffic are growing exponentially, which further emphasizes the\nrequirement for accurate and rapid assessment of video quality. Therefore,\nnumerous subjective and objective video quality assessment studies have been\nconducted over the past two decades for both generic videos and specific videos\nsuch as streaming, user-generated content (UGC), 3D, virtual and augmented\nreality (VR and AR), high frame rate (HFR), audio-visual, etc. This survey\nprovides an up-to-date and comprehensive review of these video quality\nassessment studies. Specifically, we first review the subjective video quality\nassessment methodologies and databases, which are necessary for validating the\nperformance of video quality metrics. Second, the objective video quality\nassessment algorithms for general purposes are surveyed and concluded according\nto the methodologies utilized in the quality measures. Third, we overview the\nobjective video quality assessment measures for specific applications and\nemerging topics. Finally, the performances of the state-of-the-art video\nquality assessment measures are compared and analyzed. This survey provides a\nsystematic overview of both classical works and recent progresses in the realm\nof video quality assessment, which can help other researchers quickly access\nthe field and conduct relevant research.", + "authors": "Xiongkuo Min, Huiyu Duan, Wei Sun, Yucheng Zhu, Guangtao Zhai", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "cs.MM", + "cats": [ + "cs.MM", + "cs.CV", + "eess.IV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2207.12396v2", + "title": "Exploring CLIP for Assessing the Look and Feel of Images", + "abstract": "Measuring the perception of visual content is a long-standing problem in\ncomputer vision. Many mathematical models have been developed to evaluate the\nlook or quality of an image. Despite the effectiveness of such tools in\nquantifying degradations such as noise and blurriness levels, such\nquantification is loosely coupled with human language. When it comes to more\nabstract perception about the feel of visual content, existing methods can only\nrely on supervised models that are explicitly trained with labeled data\ncollected via laborious user study. In this paper, we go beyond the\nconventional paradigms by exploring the rich visual language prior encapsulated\nin Contrastive Language-Image Pre-training (CLIP) models for assessing both the\nquality perception (look) and abstract perception (feel) of images in a\nzero-shot manner. In particular, we discuss effective prompt designs and show\nan effective prompt pairing strategy to harness the prior. We also provide\nextensive experiments on controlled datasets and Image Quality Assessment (IQA)\nbenchmarks. Our results show that CLIP captures meaningful priors that\ngeneralize well to different perceptual assessments. Code is avaliable at\nhttps://github.com/IceClear/CLIP-IQA.", + "authors": "Jianyi Wang, Kelvin C. K. Chan, Chen Change Loy", + "published": "2022-07-25", + "updated": "2022-11-23", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2204.08958v2", + "title": "MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment", + "abstract": "No-Reference Image Quality Assessment (NR-IQA) aims to assess the perceptual\nquality of images in accordance with human subjective perception.\nUnfortunately, existing NR-IQA methods are far from meeting the needs of\npredicting accurate quality scores on GAN-based distortion images. To this end,\nwe propose Multi-dimension Attention Network for no-reference Image Quality\nAssessment (MANIQA) to improve the performance on GAN-based distortion. We\nfirstly extract features via ViT, then to strengthen global and local\ninteractions, we propose the Transposed Attention Block (TAB) and the Scale\nSwin Transformer Block (SSTB). These two modules apply attention mechanisms\nacross the channel and spatial dimension, respectively. In this\nmulti-dimensional manner, the modules cooperatively increase the interaction\namong different regions of images globally and locally. Finally, a dual branch\nstructure for patch-weighted quality prediction is applied to predict the final\nscore depending on the weight of each patch's score. Experimental results\ndemonstrate that MANIQA outperforms state-of-the-art methods on four standard\ndatasets (LIVE, TID2013, CSIQ, and KADID-10K) by a large margin. Besides, our\nmethod ranked first place in the final testing phase of the NTIRE 2022\nPerceptual Image Quality Assessment Challenge Track 2: No-Reference. Codes and\nmodels are available at https://github.com/IIGROUP/MANIQA.", + "authors": "Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, Yujiu Yang", + "published": "2022-04-19", + "updated": "2022-04-21", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "eess.IV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.00020v1", + "title": "Learning Transferable Visual Models From Natural Language Supervision", + "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set\nof predetermined object categories. This restricted form of supervision limits\ntheir generality and usability since additional labeled data is needed to\nspecify any other visual concept. Learning directly from raw text about images\nis a promising alternative which leverages a much broader source of\nsupervision. We demonstrate that the simple pre-training task of predicting\nwhich caption goes with which image is an efficient and scalable way to learn\nSOTA image representations from scratch on a dataset of 400 million (image,\ntext) pairs collected from the internet. After pre-training, natural language\nis used to reference learned visual concepts (or describe new ones) enabling\nzero-shot transfer of the model to downstream tasks. We study the performance\nof this approach by benchmarking on over 30 different existing computer vision\ndatasets, spanning tasks such as OCR, action recognition in videos,\ngeo-localization, and many types of fine-grained object classification. The\nmodel transfers non-trivially to most tasks and is often competitive with a\nfully supervised baseline without the need for any dataset specific training.\nFor instance, we match the accuracy of the original ResNet-50 on ImageNet\nzero-shot without needing to use any of the 1.28 million training examples it\nwas trained on. We release our code and pre-trained model weights at\nhttps://github.com/OpenAI/CLIP.", + "authors": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever", + "published": "2021-02-26", + "updated": "2021-02-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2405.01778v1", + "title": "Hierarchical mixture of discriminative Generalized Dirichlet classifiers", + "abstract": "This paper presents a discriminative classifier for compositional data. This\nclassifier is based on the posterior distribution of the Generalized Dirichlet\nwhich is the discriminative counterpart of Generalized Dirichlet mixture model.\nMoreover, following the mixture of experts paradigm, we proposed a hierarchical\nmixture of this classifier. In order to learn the models parameters, we use a\nvariational approximation by deriving an upper-bound for the Generalized\nDirichlet mixture. To the best of our knownledge, this is the first time this\nbound is proposed in the literature. Experimental results are presented for\nspam detection and color space identification.", + "authors": "Elvis Togban, Djemel Ziou", + "published": "2024-05-02", + "updated": "2024-05-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.13934v1", + "title": "Functional mixture-of-experts for classification", + "abstract": "We develop a mixtures-of-experts (ME) approach to the multiclass\nclassification where the predictors are univariate functions. It consists of a\nME model in which both the gating network and the experts network are\nconstructed upon multinomial logistic activation functions with functional\ninputs. We perform a regularized maximum likelihood estimation in which the\ncoefficient functions enjoy interpretable sparsity constraints on targeted\nderivatives. We develop an EM-Lasso like algorithm to compute the regularized\nMLE and evaluate the proposed approach on simulated and real data.", + "authors": "Nhat Thien Pham, Faicel Chamroukhi", + "published": "2022-02-28", + "updated": "2022-02-28", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2102.06034v1", + "title": "Speech enhancement with mixture-of-deep-experts with clean clustering pre-training", + "abstract": "In this study we present a mixture of deep experts (MoDE) neural-network\narchitecture for single microphone speech enhancement. Our architecture\ncomprises a set of deep neural networks (DNNs), each of which is an 'expert' in\na different speech spectral pattern such as phoneme. A gating DNN is\nresponsible for the latent variables which are the weights assigned to each\nexpert's output given a speech segment. The experts estimate a mask from the\nnoisy input and the final mask is then obtained as a weighted average of the\nexperts' estimates, with the weights determined by the gating DNN. A soft\nspectral attenuation, based on the estimated mask, is then applied to enhance\nthe noisy speech signal. As a byproduct, we gain reduction at the complexity in\ntest time. We show that the experts specialization allows better robustness to\nunfamiliar noise types.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2021-02-11", + "updated": "2021-02-11", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "cs.LG", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.00968v2", + "title": "Omni-SMoLA: Boosting Generalist Multimodal Models with Soft Mixture of Low-rank Experts", + "abstract": "Large multi-modal models (LMMs) exhibit remarkable performance across\nnumerous tasks. However, generalist LMMs often suffer from performance\ndegradation when tuned over a large collection of tasks. Recent research\nsuggests that Mixture of Experts (MoE) architectures are useful for instruction\ntuning, but for LMMs of parameter size around O(50-100B), the prohibitive cost\nof replicating and storing the expert models severely limits the number of\nexperts we can use. We propose Omni-SMoLA, an architecture that uses the Soft\nMoE approach to (softly) mix many multimodal low rank experts, and avoids\nintroducing a significant number of new parameters compared to conventional MoE\nmodels. The core intuition here is that the large model provides a foundational\nbackbone, while different lightweight experts residually learn specialized\nknowledge, either per-modality or multimodally. Extensive experiments\ndemonstrate that the SMoLA approach helps improve the generalist performance\nacross a broad range of generative vision-and-language tasks, achieving new\nSoTA generalist performance that often matches or outperforms single\nspecialized LMM baselines, as well as new SoTA specialist performance.", + "authors": "Jialin Wu, Xia Hu, Yaqing Wang, Bo Pang, Radu Soricut", + "published": "2023-12-01", + "updated": "2024-04-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2207.09094v1", + "title": "MoEC: Mixture of Expert Clusters", + "abstract": "Sparsely Mixture of Experts (MoE) has received great interest due to its\npromising scaling capability with affordable computational overhead. MoE\nconverts dense layers into sparse experts, and utilizes a gated routing network\nto make experts conditionally activated. However, as the number of experts\ngrows, MoE with outrageous parameters suffers from overfitting and sparse data\nallocation. Such problems are especially severe on tasks with limited data,\nthus hindering the progress for MoE models to improve performance by scaling\nup. In this work, we propose Mixture of Expert Clusters - a general approach to\nenable expert layers to learn more diverse and appropriate knowledge by\nimposing variance-based constraints on the routing stage. We further propose a\ncluster-level expert dropout strategy specifically designed for the expert\ncluster structure. Our experiments reveal that MoEC could improve performance\non machine translation and natural language understanding tasks, and raise the\nperformance upper bound for scaling up experts under limited data. We also\nverify that MoEC plays a positive role in mitigating overfitting and sparse\ndata allocation.", + "authors": "Yuan Xie, Shaohan Huang, Tianyu Chen, Furu Wei", + "published": "2022-07-19", + "updated": "2022-07-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2212.00471v1", + "title": "Implicit Mixture of Interpretable Experts for Global and Local Interpretability", + "abstract": "We investigate the feasibility of using mixtures of interpretable experts\n(MoIE) to build interpretable image classifiers on MNIST10. MoIE uses a\nblack-box router to assign each input to one of many inherently interpretable\nexperts, thereby providing insight into why a particular classification\ndecision was made. We find that a naively trained MoIE will learn to 'cheat',\nwhereby the black-box router will solve the classification problem by itself,\nwith each expert simply learning a constant function for one particular class.\nWe propose to solve this problem by introducing interpretable routers and\ntraining the black-box router's decisions to match the interpretable router. In\naddition, we propose a novel implicit parameterization scheme that allows us to\nbuild mixtures of arbitrary numbers of experts, allowing us to study how\nclassification performance, local and global interpretability vary as the\nnumber of experts is increased. Our new model, dubbed Implicit Mixture of\nInterpretable Experts (IMoIE) can match state-of-the-art classification\naccuracy on MNIST10 while providing local interpretability, and can provide\nglobal interpretability albeit at the cost of reduced classification accuracy.", + "authors": "Nathan Elazar, Kerry Taylor", + "published": "2022-12-01", + "updated": "2022-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1704.00946v4", + "title": "Approximation results regarding the multiple-output mixture of linear experts model", + "abstract": "Mixture of experts (MoE) models are a class of artificial neural networks\nthat can be used for functional approximation and probabilistic modeling. An\nimportant class of MoE models is the class of mixture of linear experts (MoLE)\nmodels, where the expert functions map to real topological output spaces. There\nare a number of powerful approximation results regarding MoLE models, when the\noutput space is univariate. These results guarantee the ability of MoLE mean\nfunctions to approximate arbitrary continuous functions, and MoLE models\nthemselves to approximate arbitrary conditional probability density functions.\nWe utilize and extend upon the univariate approximation results in order to\nprove a pair of useful results for situations where the output spaces are\nmultivariate.", + "authors": "Hien D. Nguyen, Faicel Chamroukhi, Florence Forbes", + "published": "2017-04-04", + "updated": "2019-05-28", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1905.12969v1", + "title": "Enriched Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts probabilistically divide the input space into regions,\nwhere the assumptions of each expert, or conditional model, need only hold\nlocally. Combined with Gaussian process (GP) experts, this results in a\npowerful and highly flexible model. We focus on alternative mixtures of GP\nexperts, which model the joint distribution of the inputs and targets\nexplicitly. We highlight issues of this approach in multi-dimensional input\nspaces, namely, poor scalability and the need for an unnecessarily large number\nof experts, degrading the predictive performance and increasing uncertainty. We\nconstruct a novel model to address these issues through a nested partitioning\nscheme that automatically infers the number of components at both levels.\nMultiple response types are accommodated through a generalised GP framework,\nwhile multiple input types are included through a factorised exponential family\nstructure. We show the effectiveness of our approach in estimating a\nparsimonious probabilistic description of both synthetic data of increasing\ndimension and an Alzheimer's challenge dataset.", + "authors": "Charles W. L. Gadd, Sara Wade, Alexis Boukouvalas", + "published": "2019-05-30", + "updated": "2019-05-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2112.14397v2", + "title": "EvoMoE: An Evolutional Mixture-of-Experts Training Framework via Dense-To-Sparse Gate", + "abstract": "Mixture-of-experts (MoE) is becoming popular due to its success in improving\nthe model quality, especially in Transformers. By routing tokens with a sparse\ngate to a few experts (i.e., a small pieces of the full model), MoE can easily\nincrease the model parameters to a very large scale while keeping the\ncomputation cost in a constant level. Most existing works just initialize some\nrandom experts, set a fixed gating strategy (e.g., Top-k), and train the model\nfrom scratch in an ad-hoc way. We identify that these MoE models are suffering\nfrom the immature experts and unstable sparse gate, which are harmful to the\nconvergence performance. In this paper, we propose an efficient end-to-end MoE\ntraining framework called EvoMoE. EvoMoE starts from training one single expert\nand gradually evolves into a large and sparse MoE structure. EvoMoE mainly\ncontains two phases: the expert-diversify phase to train the base expert for a\nwhile and spawn multiple diverse experts from it, and the gate-sparsify phase\nto learn an adaptive sparse gate and activate a dynamic number of experts.\nEvoMoE naturally decouples the joint learning of both the experts and the\nsparse gate and focuses on learning the basic knowledge with a single expert at\nthe early training stage. Then it diversifies the experts and continues to\ntrain the MoE with a novel Dense-to-Sparse gate (DTS-Gate). Specifically,\ninstead of using a permanent sparse gate, DTS-Gate begins as a dense gate that\nroutes tokens to all experts, then gradually and adaptively becomes sparser\nwhile routes to fewer experts. Evaluations are conducted on three popular\nmodels and tasks, including RoBERTa for masked language modeling task, GPT for\nlanguage modeling task and Transformer for machine translation task. The\nresults show that EvoMoE outperforms existing baselines, including Switch, BASE\nLayer, Hash Layer and StableMoE.", + "authors": "Xiaonan Nie, Xupeng Miao, Shijie Cao, Lingxiao Ma, Qibin Liu, Jilong Xue, Youshan Miao, Yi Liu, Zhi Yang, Bin Cui", + "published": "2021-12-29", + "updated": "2022-10-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.01334v2", + "title": "Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy", + "abstract": "Sparsely activated Mixture-of-Experts (SMoE) has shown promise to scale up\nthe learning capacity of neural networks, however, they have issues like (a)\nHigh Memory Usage, due to duplication of the network layers into multiple\ncopies as experts; and (b) Redundancy in Experts, as common learning-based\nrouting policies suffer from representational collapse. Therefore, vanilla SMoE\nmodels are memory inefficient and non-scalable, especially for\nresource-constrained downstream scenarios. In this paper, we ask: Can we craft\na compact SMoE model by consolidating expert information? What is the best\nrecipe to merge multiple experts into fewer but more knowledgeable experts? Our\npilot investigation reveals that conventional model merging methods fail to be\neffective in such expert merging for SMoE. The potential reasons are: (1)\nredundant information overshadows critical experts; (2) appropriate neuron\npermutation for each expert is missing to bring all of them in alignment. To\naddress this, we propose M-SMoE, which leverages routing statistics to guide\nexpert merging. Specifically, it starts with neuron permutation alignment for\nexperts; then, dominant experts and their \"group members\" are formed; lastly,\nevery expert group is merged into a single expert by utilizing each expert's\nactivation frequency as their weight for merging, thus diminishing the impact\nof insignificant experts. Moreover, we observed that our proposed merging\npromotes a low dimensionality in the merged expert's weight space, naturally\npaving the way for additional compression. Hence, our final method, MC-SMoE\n(i.e., Merge, then Compress SMoE), further decomposes the merged experts into\nlow-rank and structural sparse alternatives. Extensive experiments across 8\nbenchmarks validate the effectiveness of MC-SMoE. For instance, our MC-SMoE\nachieves up to 80% memory and a 20% FLOPs reduction, with virtually no loss in\nperformance.", + "authors": "Pingzhi Li, Zhenyu Zhang, Prateek Yadav, Yi-Lin Sung, Yu Cheng, Mohit Bansal, Tianlong Chen", + "published": "2023-10-02", + "updated": "2024-03-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.12379v4", + "title": "Mixture of Cluster-conditional LoRA Experts for Vision-language Instruction Tuning", + "abstract": "Instruction tuning of Large Vision-language Models (LVLMs) has revolutionized\nthe development of versatile models with zero-shot generalization across a wide\nrange of downstream vision-language tasks. However, the diversity of training\ntasks of different sources and formats would lead to inevitable task conflicts,\nwhere different tasks conflict for the same set of model parameters, resulting\nin sub-optimal instructionfollowing abilities. To address that, we propose the\nMixture of Clusterconditional LoRA Experts (MoCLE), a novel Mixture of Experts\n(MoE) architecture designed to activate the task-customized model parameters\nbased on the instruction clusters. A separate universal expert is further\nincorporated to improve generalization capabilities of MoCLE for novel\ninstructions. Extensive experiments on 11 zero-shot tasks demonstrate the\neffectiveness of MoCLE.", + "authors": "Yunhao Gou, Zhili Liu, Kai Chen, Lanqing Hong, Hang Xu, Aoxue Li, Dit-Yan Yeung, James T. Kwok, Yu Zhang", + "published": "2023-12-19", + "updated": "2024-03-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.05220v1", + "title": "On Parameter Estimation in Deviated Gaussian Mixture of Experts", + "abstract": "We consider the parameter estimation problem in the deviated Gaussian mixture\nof experts in which the data are generated from $(1 - \\lambda^{\\ast}) g_0(Y|\nX)+ \\lambda^{\\ast} \\sum_{i = 1}^{k_{\\ast}} p_{i}^{\\ast}\nf(Y|(a_{i}^{\\ast})^{\\top}X+b_i^{\\ast},\\sigma_{i}^{\\ast})$, where $X, Y$ are\nrespectively a covariate vector and a response variable, $g_{0}(Y|X)$ is a\nknown function, $\\lambda^{\\ast} \\in [0, 1]$ is true but unknown mixing\nproportion, and $(p_{i}^{\\ast}, a_{i}^{\\ast}, b_{i}^{\\ast}, \\sigma_{i}^{\\ast})$\nfor $1 \\leq i \\leq k^{\\ast}$ are unknown parameters of the Gaussian mixture of\nexperts. This problem arises from the goodness-of-fit test when we would like\nto test whether the data are generated from $g_{0}(Y|X)$ (null hypothesis) or\nthey are generated from the whole mixture (alternative hypothesis). Based on\nthe algebraic structure of the expert functions and the distinguishability\nbetween $g_0$ and the mixture part, we construct novel Voronoi-based loss\nfunctions to capture the convergence rates of maximum likelihood estimation\n(MLE) for our models. We further demonstrate that our proposed loss functions\ncharacterize the local convergence rates of parameter estimation more\naccurately than the generalized Wasserstein, a loss function being commonly\nused for estimating parameters in the Gaussian mixture of experts.", + "authors": "Huy Nguyen, Khai Nguyen, Nhat Ho", + "published": "2024-02-07", + "updated": "2024-02-07", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.09179v3", + "title": "On the Representation Collapse of Sparse Mixture of Experts", + "abstract": "Sparse mixture of experts provides larger model capacity while requiring a\nconstant computational overhead. It employs the routing mechanism to distribute\ninput tokens to the best-matched experts according to their hidden\nrepresentations. However, learning such a routing mechanism encourages token\nclustering around expert centroids, implying a trend toward representation\ncollapse. In this work, we propose to estimate the routing scores between\ntokens and experts on a low-dimensional hypersphere. We conduct extensive\nexperiments on cross-lingual language model pre-training and fine-tuning on\ndownstream tasks. Experimental results across seven multilingual benchmarks\nshow that our method achieves consistent gains. We also present a comprehensive\nanalysis on the representation and routing behaviors of our models. Our method\nalleviates the representation collapse issue and achieves more consistent\nrouting than the baseline mixture-of-experts methods.", + "authors": "Zewen Chi, Li Dong, Shaohan Huang, Damai Dai, Shuming Ma, Barun Patra, Saksham Singhal, Payal Bajaj, Xia Song, Xian-Ling Mao, Heyan Huang, Furu Wei", + "published": "2022-04-20", + "updated": "2022-10-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1809.04853v2", + "title": "Bayesian shrinkage in mixture of experts models: Identifying robust determinants of class membership", + "abstract": "A method for implicit variable selection in mixture of experts frameworks is\nproposed. We introduce a prior structure where information is taken from a set\nof independent covariates. Robust class membership predictors are identified\nusing a normal gamma prior. The resulting model setup is used in a finite\nmixture of Bernoulli distributions to find homogenous clusters of women in\nMozambique based on their information sources on HIV. Fully Bayesian inference\nis carried out via the implementation of a Gibbs sampler.", + "authors": "Gregor Zens", + "published": "2018-09-13", + "updated": "2019-01-12", + "primary_cat": "econ.EM", + "cats": [ + "econ.EM", + "62F15, 62J07, 62H30, 90-08" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02410v1", + "title": "Mixture of Quantized Experts (MoQE): Complementary Effect of Low-bit Quantization and Robustness", + "abstract": "Large Mixture of Experts (MoE) models could achieve state-of-the-art quality\non various language tasks, including machine translation task, thanks to the\nefficient model scaling capability with expert parallelism. However, it has\nbrought a fundamental issue of larger memory consumption and increased memory\nbandwidth bottleneck at deployment time. In this paper, we propose Mixture of\nQuantized Experts (MoQE) which is a simple weight-only quantization method\napplying ultra low-bit down to 2-bit quantizations only to expert weights for\nmitigating the increased memory and latency issues of MoE models. We show that\nlow-bit quantization together with the MoE architecture delivers a reliable\nmodel performance while reducing the memory size significantly even without any\nadditional training in most cases. In particular, expert layers in MoE models\nare much more robust to the quantization than conventional feedforward networks\n(FFN) layers. In our comprehensive analysis, we show that MoE models with 2-bit\nexpert weights can deliver better model performance than the dense model\ntrained on the same dataset. As a result of low-bit quantization, we show the\nmodel size can be reduced by 79.6% of the original half precision floating\npoint (fp16) MoE model. Combined with an optimized GPU runtime implementation,\nit also achieves 1.24X speed-up on A100 GPUs.", + "authors": "Young Jin Kim, Raffy Fahim, Hany Hassan Awadalla", + "published": "2023-10-03", + "updated": "2023-10-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2107.04694v1", + "title": "Lifelong Mixture of Variational Autoencoders", + "abstract": "In this paper, we propose an end-to-end lifelong learning mixture of experts.\nEach expert is implemented by a Variational Autoencoder (VAE). The experts in\nthe mixture system are jointly trained by maximizing a mixture of individual\ncomponent evidence lower bounds (MELBO) on the log-likelihood of the given\ntraining samples. The mixing coefficients in the mixture, control the\ncontributions of each expert in the goal representation. These are sampled from\na Dirichlet distribution whose parameters are determined through non-parametric\nestimation during lifelong learning. The model can learn new tasks fast when\nthese are similar to those previously learnt. The proposed Lifelong mixture of\nVAE (L-MVAE) expands its architecture with new components when learning a\ncompletely new task. After the training, our model can automatically determine\nthe relevant expert to be used when fed with new data samples. This mechanism\nbenefits both the memory efficiency and the required computational cost as only\none expert is used during the inference. The L-MVAE inference model is able to\nperform interpolation in the joint latent space across the data domains\nassociated with different tasks and is shown to be efficient for disentangled\nlearning representation.", + "authors": "Fei Ye, Adrian G. Bors", + "published": "2021-07-09", + "updated": "2021-07-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.05346v1", + "title": "A Modular Task-oriented Dialogue System Using a Neural Mixture-of-Experts", + "abstract": "End-to-end Task-oriented Dialogue Systems (TDSs) have attracted a lot of\nattention for their superiority (e.g., in terms of global optimization) over\npipeline modularized TDSs. Previous studies on end-to-end TDSs use a\nsingle-module model to generate responses for complex dialogue contexts.\nHowever, no model consistently outperforms the others in all cases. We propose\na neural Modular Task-oriented Dialogue System(MTDS) framework, in which a few\nexpert bots are combined to generate the response for a given dialogue context.\nMTDS consists of a chair bot and several expert bots. Each expert bot is\nspecialized for a particular situation, e.g., one domain, one type of action of\na system, etc. The chair bot coordinates multiple expert bots and adaptively\nselects an expert bot to generate the appropriate response. We further propose\na Token-level Mixture-of-Expert (TokenMoE) model to implement MTDS, where the\nexpert bots predict multiple tokens at each timestamp and the chair bot\ndetermines the final generated token by fully taking into consideration the\noutputs of all expert bots. Both the chair bot and the expert bots are jointly\ntrained in an end-to-end fashion. To verify the effectiveness of TokenMoE, we\ncarry out extensive experiments on a benchmark dataset. Compared with the\nbaseline using a single-module model, our TokenMoE improves the performance by\n8.1% of inform rate and 0.8% of success rate.", + "authors": "Jiahuan Pei, Pengjie Ren, Maarten de Rijke", + "published": "2019-07-10", + "updated": "2019-07-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.IR", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1511.06072v1", + "title": "Mediated Experts for Deep Convolutional Networks", + "abstract": "We present a new supervised architecture termed Mediated Mixture-of-Experts\n(MMoE) that allows us to improve classification accuracy of Deep Convolutional\nNetworks (DCN). Our architecture achieves this with the help of expert\nnetworks: A network is trained on a disjoint subset of a given dataset and then\nrun in parallel to other experts during deployment. A mediator is employed if\nexperts contradict each other. This allows our framework to naturally support\nincremental learning, as adding new classes requires (re-)training of the new\nexpert only. We also propose two measures to control computational complexity:\nAn early-stopping mechanism halts experts that have low confidence in their\nprediction. The system allows to trade-off accuracy and complexity without\nfurther retraining. We also suggest to share low-level convolutional layers\nbetween experts in an effort to avoid computation of a near-duplicate feature\nset. We evaluate our system on a popular dataset and report improved accuracy\ncompared to a single model of same configuration.", + "authors": "Sebastian Agethen, Winston H. Hsu", + "published": "2015-11-19", + "updated": "2015-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.11449v2", + "title": "Dynamic Mixture of Experts Models for Online Prediction", + "abstract": "A mixture of experts models the conditional density of a response variable\nusing a mixture of regression models with covariate-dependent mixture weights.\nWe extend the finite mixture of experts model by allowing the parameters in\nboth the mixture components and the weights to evolve in time by following\nrandom walk processes. Inference for time-varying parameters in richly\nparameterized mixture of experts models is challenging. We propose a sequential\nMonte Carlo algorithm for online inference and based on a tailored proposal\ndistribution built on ideas from linear Bayes methods and the EM algorithm. The\nmethod gives a unified treatment for mixtures with time-varying parameters,\nincluding the special case of static parameters. We assess the properties of\nthe method on simulated data and on industrial data where the aim is to predict\nsoftware faults in a continuously upgraded large-scale software project.", + "authors": "Parfait Munezero, Mattias Villani, Robert Kohn", + "published": "2021-09-23", + "updated": "2022-10-13", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO", + "stat.AP" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2404.15045v1", + "title": "Multi-Head Mixture-of-Experts", + "abstract": "Sparse Mixtures of Experts (SMoE) scales model capacity without significant\nincreases in training and inference costs, but exhibits the following two\nissues: (1) Low expert activation, where only a small subset of experts are\nactivated for optimization. (2) Lacking fine-grained analytical capabilities\nfor multiple semantic concepts within individual tokens. We propose Multi-Head\nMixture-of-Experts (MH-MoE), which employs a multi-head mechanism to split each\ntoken into multiple sub-tokens. These sub-tokens are then assigned to and\nprocessed by a diverse set of experts in parallel, and seamlessly reintegrated\ninto the original token form. The multi-head mechanism enables the model to\ncollectively attend to information from various representation spaces within\ndifferent experts, while significantly enhances expert activation, thus deepens\ncontext understanding and alleviate overfitting. Moreover, our MH-MoE is\nstraightforward to implement and decouples from other SMoE optimization\nmethods, making it easy to integrate with other SMoE models for enhanced\nperformance. Extensive experimental results across three tasks: English-focused\nlanguage modeling, Multi-lingual language modeling and Masked multi-modality\nmodeling tasks, demonstrate the effectiveness of MH-MoE.", + "authors": "Xun Wu, Shaohan Huang, Wenhui Wang, Furu Wei", + "published": "2024-04-23", + "updated": "2024-04-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.07806v1", + "title": "Transformer Based Multi-Source Domain Adaptation", + "abstract": "In practical machine learning settings, the data on which a model must make\npredictions often come from a different distribution than the data it was\ntrained on. Here, we investigate the problem of unsupervised multi-source\ndomain adaptation, where a model is trained on labelled data from multiple\nsource domains and must make predictions on a domain for which no labelled data\nhas been seen. Prior work with CNNs and RNNs has demonstrated the benefit of\nmixture of experts, where the predictions of multiple domain expert classifiers\nare combined; as well as domain adversarial training, to induce a domain\nagnostic representation space. Inspired by this, we investigate how such\nmethods can be effectively applied to large pretrained transformer models. We\nfind that domain adversarial training has an effect on the learned\nrepresentations of these models while having little effect on their\nperformance, suggesting that large transformer-based models are already\nrelatively robust across domains. Additionally, we show that mixture of experts\nleads to significant performance improvements by comparing several variants of\nmixing functions, including one novel mixture based on attention. Finally, we\ndemonstrate that the predictions of large pretrained transformer based domain\nexperts are highly homogenous, making it challenging to learn effective\nfunctions for mixing their predictions.", + "authors": "Dustin Wright, Isabelle Augenstein", + "published": "2020-09-16", + "updated": "2020-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2109.05238v3", + "title": "Universal Simultaneous Machine Translation with Mixture-of-Experts Wait-k Policy", + "abstract": "Simultaneous machine translation (SiMT) generates translation before reading\nthe entire source sentence and hence it has to trade off between translation\nquality and latency. To fulfill the requirements of different translation\nquality and latency in practical applications, the previous methods usually\nneed to train multiple SiMT models for different latency levels, resulting in\nlarge computational costs. In this paper, we propose a universal SiMT model\nwith Mixture-of-Experts Wait-k Policy to achieve the best translation quality\nunder arbitrary latency with only one trained model. Specifically, our method\nemploys multi-head attention to accomplish the mixture of experts where each\nhead is treated as a wait-k expert with its own waiting words number, and given\na test latency and source inputs, the weights of the experts are accordingly\nadjusted to produce the best translation. Experiments on three datasets show\nthat our method outperforms all the strong baselines under different latency,\nincluding the state-of-the-art adaptive policy.", + "authors": "Shaolei Zhang, Yang Feng", + "published": "2021-09-11", + "updated": "2022-03-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1703.09302v1", + "title": "Speech Enhancement using a Deep Mixture of Experts", + "abstract": "In this study we present a Deep Mixture of Experts (DMoE) neural-network\narchitecture for single microphone speech enhancement. By contrast to most\nspeech enhancement algorithms that overlook the speech variability mainly\ncaused by phoneme structure, our framework comprises a set of deep neural\nnetworks (DNNs), each one of which is an 'expert' in enhancing a given speech\ntype corresponding to a phoneme. A gating DNN determines which expert is\nassigned to a given speech segment. A speech presence probability (SPP) is then\nobtained as a weighted average of the expert SPP decisions, with the weights\ndetermined by the gating DNN. A soft spectral attenuation, based on the SPP, is\nthen applied to enhance the noisy speech signal. The experts and the gating\ncomponents of the DMoE network are trained jointly. As part of the training,\nspeech clustering into different subsets is performed in an unsupervised\nmanner. Therefore, unlike previous methods, a phoneme-labeled database is not\nrequired for the training procedure. A series of experiments with different\nnoise types verified the applicability of the new algorithm to the task of\nspeech enhancement. The proposed scheme outperforms other schemes that either\ndo not consider phoneme structure or use a simpler training methodology.", + "authors": "Shlomo E. Chazan, Jacob Goldberger, Sharon Gannot", + "published": "2017-03-27", + "updated": "2017-03-27", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1907.04377v2", + "title": "Convergence Rates for Gaussian Mixtures of Experts", + "abstract": "We provide a theoretical treatment of over-specified Gaussian mixtures of\nexperts with covariate-free gating networks. We establish the convergence rates\nof the maximum likelihood estimation (MLE) for these models. Our proof\ntechnique is based on a novel notion of \\emph{algebraic independence} of the\nexpert functions. Drawing on optimal transport theory, we establish a\nconnection between the algebraic independence and a certain class of partial\ndifferential equations (PDEs). Exploiting this connection allows us to derive\nconvergence rates and minimax lower bounds for parameter estimation.", + "authors": "Nhat Ho, Chiao-Yu Yang, Michael I. Jordan", + "published": "2019-07-09", + "updated": "2022-03-08", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "cs.LG", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1911.08151v2", + "title": "Retrospective and Prospective Mixture-of-Generators for Task-oriented Dialogue Response Generation", + "abstract": "Dialogue response generation (DRG) is a critical component of task-oriented\ndialogue systems (TDSs). Its purpose is to generate proper natural language\nresponses given some context, e.g., historical utterances, system states, etc.\nState-of-the-art work focuses on how to better tackle DRG in an end-to-end way.\nTypically, such studies assume that each token is drawn from a single\ndistribution over the output vocabulary, which may not always be optimal.\nResponses vary greatly with different intents, e.g., domains, system actions.\n We propose a novel mixture-of-generators network (MoGNet) for DRG, where we\nassume that each token of a response is drawn from a mixture of distributions.\nMoGNet consists of a chair generator and several expert generators. Each expert\nis specialized for DRG w.r.t. a particular intent. The chair coordinates\nmultiple experts and combines the output they have generated to produce more\nappropriate responses. We propose two strategies to help the chair make better\ndecisions, namely, a retrospective mixture-of-generators (RMoG) and prospective\nmixture-of-generators (PMoG). The former only considers the historical\nexpert-generated responses until the current time step while the latter also\nconsiders possible expert-generated responses in the future by encouraging\nexploration. In order to differentiate experts, we also devise a\nglobal-and-local (GL) learning scheme that forces each expert to be specialized\ntowards a particular intent using a local loss and trains the chair and all\nexperts to coordinate using a global loss.\n We carry out extensive experiments on the MultiWOZ benchmark dataset. MoGNet\nsignificantly outperforms state-of-the-art methods in terms of both automatic\nand human evaluations, demonstrating its effectiveness for DRG.", + "authors": "Jiahuan Pei, Pengjie Ren, Christof Monz, Maarten de Rijke", + "published": "2019-11-19", + "updated": "2020-02-19", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05444v1", + "title": "Pushing Mixture of Experts to the Limit: Extremely Parameter Efficient MoE for Instruction Tuning", + "abstract": "The Mixture of Experts (MoE) is a widely known neural architecture where an\nensemble of specialized sub-models optimizes overall performance with a\nconstant computational cost. However, conventional MoEs pose challenges at\nscale due to the need to store all experts in memory. In this paper, we push\nMoE to the limit. We propose extremely parameter-efficient MoE by uniquely\ncombining MoE architecture with lightweight experts.Our MoE architecture\noutperforms standard parameter-efficient fine-tuning (PEFT) methods and is on\npar with full fine-tuning by only updating the lightweight experts -- less than\n1% of an 11B parameters model. Furthermore, our method generalizes to unseen\ntasks as it does not depend on any prior task knowledge. Our research\nunderscores the versatility of the mixture of experts architecture, showcasing\nits ability to deliver robust performance even when subjected to rigorous\nparameter constraints. Our code used in all the experiments is publicly\navailable here: https://github.com/for-ai/parameter-efficient-moe.", + "authors": "Ted Zadouri, Ahmet \u00dcst\u00fcn, Arash Ahmadian, Beyza Ermi\u015f, Acyr Locatelli, Sara Hooker", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1904.09948v1", + "title": "PLUME: Polyhedral Learning Using Mixture of Experts", + "abstract": "In this paper, we propose a novel mixture of expert architecture for learning\npolyhedral classifiers. We learn the parameters of the classifierusing an\nexpectation maximization algorithm. Wederive the generalization bounds of the\nproposedapproach. Through an extensive simulation study, we show that the\nproposed method performs comparably to other state-of-the-art approaches.", + "authors": "Kulin Shah, P. S. Sastry, Naresh Manwani", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.07109v3", + "title": "Context-aware Mixture-of-Experts for Unbiased Scene Graph Generation", + "abstract": "Scene graph generation (SGG) has gained tremendous progress in recent years.\nHowever, its underlying long-tailed distribution of predicate classes is a\nchallenging problem. For extremely unbalanced predicate distributions, existing\napproaches usually construct complicated context encoders to extract the\nintrinsic relevance of scene context to predicates and complex networks to\nimprove the learning ability of network models for highly imbalanced predicate\ndistributions. To address the unbiased SGG problem, we introduce a simple yet\neffective method dubbed Context-Aware Mixture-of-Experts (CAME) to improve\nmodel diversity and mitigate biased SGG without complicated design.\nSpecifically, we propose to integrate the mixture of experts with a divide and\nensemble strategy to remedy the severely long-tailed distribution of predicate\nclasses, which is applicable to the majority of unbiased scene graph\ngenerators. The biased SGG is thereby reduced, and the model tends to\nanticipate more evenly distributed predicate predictions. To differentiate\nbetween various predicate distribution levels, experts with the same weights\nare not sufficiently diverse. In order to enable the network dynamically\nexploit the rich scene context and further boost the diversity of model, we\nsimply use the built-in module to create a context encoder. The importance of\neach expert to scene context and each predicate to each expert is dynamically\nassociated with expert weighting (EW) and predicate weighting (PW) strategy. We\nhave conducted extensive experiments on three tasks using the Visual Genome\ndataset, showing that CAME outperforms recent methods and achieves\nstate-of-the-art performance. Our code will be available publicly.", + "authors": "Liguang Zhou, Yuhongze Zhou, Tin Lun Lam, Yangsheng Xu", + "published": "2022-08-15", + "updated": "2023-01-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.11412v1", + "title": "Expert Composer Policy: Scalable Skill Repertoire for Quadruped Robots", + "abstract": "We propose the expert composer policy, a framework to reliably expand the\nskill repertoire of quadruped agents. The composer policy links pair of experts\nvia transitions to a sampled target state, allowing experts to be composed\nsequentially. Each expert specializes in a single skill, such as a locomotion\ngait or a jumping motion. Instead of a hierarchical or mixture-of-experts\narchitecture, we train a single composer policy in an independent process that\nis not conditioned on the other expert policies. By reusing the same composer\npolicy, our approach enables adding new experts without affecting existing\nones, enabling incremental repertoire expansion and preserving original motion\nquality. We measured the transition success rate of 72 transition pairs and\nachieved an average success rate of 99.99\\%, which is over 10\\% higher than the\nbaseline random approach, and outperforms other state-of-the-art methods. Using\ndomain randomization during training we ensure a successful transfer to the\nreal world, where we achieve an average transition success rate of 97.22\\%\n(N=360) in our experiments.", + "authors": "Guilherme Christmann, Ying-Sheng Luo, Wei-Chao Chen", + "published": "2024-03-18", + "updated": "2024-03-18", + "primary_cat": "cs.RO", + "cats": [ + "cs.RO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.02952v1", + "title": "On Least Squares Estimation in Softmax Gating Mixture of Experts", + "abstract": "Mixture of experts (MoE) model is a statistical machine learning design that\naggregates multiple expert networks using a softmax gating function in order to\nform a more intricate and expressive model. Despite being commonly used in\nseveral applications owing to their scalability, the mathematical and\nstatistical properties of MoE models are complex and difficult to analyze. As a\nresult, previous theoretical works have primarily focused on probabilistic MoE\nmodels by imposing the impractical assumption that the data are generated from\na Gaussian MoE model. In this work, we investigate the performance of the least\nsquares estimators (LSE) under a deterministic MoE model where the data are\nsampled according to a regression model, a setting that has remained largely\nunexplored. We establish a condition called strong identifiability to\ncharacterize the convergence behavior of various types of expert functions. We\ndemonstrate that the rates for estimating strongly identifiable experts, namely\nthe widely used feed forward networks with activation functions\n$\\mathrm{sigmoid}(\\cdot)$ and $\\tanh(\\cdot)$, are substantially faster than\nthose of polynomial experts, which we show to exhibit a surprising slow\nestimation rate. Our findings have important practical implications for expert\nselection.", + "authors": "Huy Nguyen, Nhat Ho, Alessandro Rinaldo", + "published": "2024-02-05", + "updated": "2024-02-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.15961v1", + "title": "Mixture of Tokens: Efficient LLMs through Cross-Example Aggregation", + "abstract": "Despite the promise of Mixture of Experts (MoE) models in increasing\nparameter counts of Transformer models while maintaining training and inference\ncosts, their application carries notable drawbacks. The key strategy of these\nmodels is to, for each processed token, activate at most a few experts -\nsubsets of an extensive feed-forward layer. But this approach is not without\nits challenges. The operation of matching experts and tokens is discrete, which\nmakes MoE models prone to issues like training instability and uneven expert\nutilization. Existing techniques designed to address these concerns, such as\nauxiliary losses or balance-aware matching, result either in lower model\nperformance or are more difficult to train. In response to these issues, we\npropose Mixture of Tokens, a fully-differentiable model that retains the\nbenefits of MoE architectures while avoiding the aforementioned difficulties.\nRather than routing tokens to experts, this approach mixes tokens from\ndifferent examples prior to feeding them to experts, enabling the model to\nlearn from all token-expert combinations. Importantly, this mixing can be\ndisabled to avoid mixing of different sequences during inference. Crucially,\nthis method is fully compatible with both masked and causal Large Language\nModel training and inference.", + "authors": "Szymon Antoniak, Sebastian Jaszczur, Micha\u0142 Krutul, Maciej Pi\u00f3ro, Jakub Krajewski, Jan Ludziejewski, Tomasz Odrzyg\u00f3\u017ad\u017a, Marek Cygan", + "published": "2023-10-24", + "updated": "2023-10-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.12830v1", + "title": "Mixtures of Gaussian Process Experts with SMC$^2$", + "abstract": "Gaussian processes are a key component of many flexible statistical and\nmachine learning models. However, they exhibit cubic computational complexity\nand high memory constraints due to the need of inverting and storing a full\ncovariance matrix. To circumvent this, mixtures of Gaussian process experts\nhave been considered where data points are assigned to independent experts,\nreducing the complexity by allowing inference based on smaller, local\ncovariance matrices. Moreover, mixtures of Gaussian process experts\nsubstantially enrich the model's flexibility, allowing for behaviors such as\nnon-stationarity, heteroscedasticity, and discontinuities. In this work, we\nconstruct a novel inference approach based on nested sequential Monte Carlo\nsamplers to simultaneously infer both the gating network and Gaussian process\nexpert parameters. This greatly improves inference compared to importance\nsampling, particularly in settings when a stationary Gaussian process is\ninappropriate, while still being thoroughly parallelizable.", + "authors": "Teemu H\u00e4rk\u00f6nen, Sara Wade, Kody Law, Lassi Roininen", + "published": "2022-08-26", + "updated": "2022-08-26", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.06066v1", + "title": "DeepSeekMoE: Towards Ultimate Expert Specialization in Mixture-of-Experts Language Models", + "abstract": "In the era of large language models, Mixture-of-Experts (MoE) is a promising\narchitecture for managing computational costs when scaling up model parameters.\nHowever, conventional MoE architectures like GShard, which activate the top-$K$\nout of $N$ experts, face challenges in ensuring expert specialization, i.e.\neach expert acquires non-overlapping and focused knowledge. In response, we\npropose the DeepSeekMoE architecture towards ultimate expert specialization. It\ninvolves two principal strategies: (1) finely segmenting the experts into $mN$\nones and activating $mK$ from them, allowing for a more flexible combination of\nactivated experts; (2) isolating $K_s$ experts as shared ones, aiming at\ncapturing common knowledge and mitigating redundancy in routed experts.\nStarting from a modest scale with 2B parameters, we demonstrate that\nDeepSeekMoE 2B achieves comparable performance with GShard 2.9B, which has 1.5\ntimes the expert parameters and computation. In addition, DeepSeekMoE 2B nearly\napproaches the performance of its dense counterpart with the same number of\ntotal parameters, which set the upper bound of MoE models. Subsequently, we\nscale up DeepSeekMoE to 16B parameters and show that it achieves comparable\nperformance with LLaMA2 7B, with only about 40% of computations. Further, our\npreliminary efforts to scale up DeepSeekMoE to 145B parameters consistently\nvalidate its substantial advantages over the GShard architecture, and show its\nperformance comparable with DeepSeek 67B, using only 28.5% (maybe even 18.2%)\nof computations.", + "authors": "Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, Wenfeng Liang", + "published": "2024-01-11", + "updated": "2024-01-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2006.13309v4", + "title": "Fast Deep Mixtures of Gaussian Process Experts", + "abstract": "Mixtures of experts have become an indispensable tool for flexible modelling\nin a supervised learning context, allowing not only the mean function but the\nentire density of the output to change with the inputs. Sparse Gaussian\nprocesses (GP) have shown promise as a leading candidate for the experts in\nsuch models, and in this article, we propose to design the gating network for\nselecting the experts from such mixtures of sparse GPs using a deep neural\nnetwork (DNN). Furthermore, a fast one pass algorithm called\nCluster-Classify-Regress (CCR) is leveraged to approximate the maximum a\nposteriori (MAP) estimator extremely quickly. This powerful combination of\nmodel and algorithm together delivers a novel method which is flexible, robust,\nand extremely efficient. In particular, the method is able to outperform\ncompeting methods in terms of accuracy and uncertainty quantification. The cost\nis competitive on low-dimensional and small data sets, but is significantly\nlower for higher-dimensional and big data sets. Iteratively maximizing the\ndistribution of experts given allocations and allocations given experts does\nnot provide significant improvement, which indicates that the algorithm\nachieves a good approximation to the local MAP estimator very fast. This\ninsight can be useful also in the context of other mixture of experts models.", + "authors": "Clement Etienam, Kody Law, Sara Wade, Vitaly Zankin", + "published": "2020-06-11", + "updated": "2023-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.13833v2", + "title": "Mixtures of Gaussian process experts based on kernel stick-breaking processes", + "abstract": "Mixtures of Gaussian process experts is a class of models that can\nsimultaneously address two of the key limitations inherent in standard Gaussian\nprocesses: scalability and predictive performance. In particular, models that\nuse Dirichlet processes as gating functions permit straightforward\ninterpretation and automatic selection of the number of experts in a mixture.\nWhile the existing models are intuitive and capable of capturing\nnon-stationarity, multi-modality and heteroskedasticity, the simplicity of\ntheir gating functions may limit the predictive performance when applied to\ncomplex data-generating processes. Capitalising on the recent advancement in\nthe dependent Dirichlet processes literature, we propose a new mixture model of\nGaussian process experts based on kernel stick-breaking processes. Our model\nmaintains the intuitive appeal yet improve the performance of the existing\nmodels. To make it practical, we design a sampler for posterior computation\nbased on the slice sampling. The model behaviour and improved predictive\nperformance are demonstrated in experiments using six datasets.", + "authors": "Yuji Saikai, Khue-Dung Dang", + "published": "2023-04-26", + "updated": "2023-05-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2205.01848v2", + "title": "Optimizing Mixture of Experts using Dynamic Recompilations", + "abstract": "The Mixture of Experts architecture allows for outrageously large neural\nnetworks by scaling model parameter size independently from computational\ndemand (FLOPs). However, current DNN frameworks cannot effectively support the\ndynamic data flow in Mixture of Experts, and implementations on top of these\nframeworks need to use workarounds that introduce significant overheads. To\naddress the limitation of these frameworks, we present DynaMoE, a DNN library\nthat uses dynamic recompilations to optimize and adapt the use of computational\nresources to the dynamic needs of Mixture of Experts models. Our evaluation\nshows that DynaMoE achieves a 1.8x speedup and supports 2.3x larger model sizes\nwhen compared to existing MoE systems, even when not using recompilations. We\nthen present further optimizations enabled by dynamic recompilations that yield\nan additional 1.7x speedup while simultaneously reducing memory pressure and\nimproving model quality.", + "authors": "Ferdinand Kossmann, Zhihao Jia, Alex Aiken", + "published": "2022-05-04", + "updated": "2022-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.00893v1", + "title": "MoDE: A Mixture-of-Experts Model with Mutual Distillation among the Experts", + "abstract": "The application of mixture-of-experts (MoE) is gaining popularity due to its\nability to improve model's performance. In an MoE structure, the gate layer\nplays a significant role in distinguishing and routing input features to\ndifferent experts. This enables each expert to specialize in processing their\ncorresponding sub-tasks. However, the gate's routing mechanism also gives rise\nto narrow vision: the individual MoE's expert fails to use more samples in\nlearning the allocated sub-task, which in turn limits the MoE to further\nimprove its generalization ability. To effectively address this, we propose a\nmethod called Mixture-of-Distilled-Expert (MoDE), which applies moderate mutual\ndistillation among experts to enable each expert to pick up more features\nlearned by other experts and gain more accurate perceptions on their original\nallocated sub-tasks. We conduct plenty experiments including tabular, NLP and\nCV datasets, which shows MoDE's effectiveness, universality and robustness.\nFurthermore, we develop a parallel study through innovatively constructing\n\"expert probing\", to experimentally prove why MoDE works: moderate distilling\nknowledge can improve each individual expert's test performances on their\nassigned tasks, leading to MoE's overall performance improvement.", + "authors": "Zhitian Xie, Yinger Zhang, Chenyi Zhuang, Qitao Shi, Zhining Liu, Jinjie Gu, Guannan Zhang", + "published": "2024-01-31", + "updated": "2024-01-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.17749v1", + "title": "Multi-Task Dense Prediction via Mixture of Low-Rank Experts", + "abstract": "Previous multi-task dense prediction methods based on the Mixture of Experts\n(MoE) have received great performance but they neglect the importance of\nexplicitly modeling the global relations among all tasks. In this paper, we\npresent a novel decoder-focused method for multi-task dense prediction, called\nMixture-of-Low-Rank-Experts (MLoRE). To model the global task relationships,\nMLoRE adds a generic convolution path to the original MoE structure, where each\ntask feature can go through this path for explicit parameter sharing.\nFurthermore, to control the parameters and computational cost brought by the\nincrease in the number of experts, we take inspiration from LoRA and propose to\nleverage the low-rank format of a vanilla convolution in the expert network.\nSince the low-rank experts have fewer parameters and can be dynamically\nparameterized into the generic convolution, the parameters and computational\ncost do not change much with the increase of experts. Benefiting from this\ndesign, we increase the number of experts and its reception field to enlarge\nthe representation capacity, facilitating multiple dense tasks learning in a\nunified network. Extensive experiments on the PASCAL-Context and NYUD-v2\nbenchmarks show that our MLoRE achieves superior performance compared to\nprevious state-of-the-art methods on all metrics. Our code is available at\nhttps://github.com/YuqiYang213/MLoRE.", + "authors": "Yuqi Yang, Peng-Tao Jiang, Qibin Hou, Hao Zhang, Jinwei Chen, Bo Li", + "published": "2024-03-26", + "updated": "2024-03-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1605.01652v1", + "title": "LSTM-based Mixture-of-Experts for Knowledge-Aware Dialogues", + "abstract": "We introduce an LSTM-based method for dynamically integrating several\nword-prediction experts to obtain a conditional language model which can be\ngood simultaneously at several subtasks. We illustrate this general approach\nwith an application to dialogue where we integrate a neural chat model, good at\nconversational aspects, with a neural question-answering model, good at\nretrieving precise information from a knowledge-base, and show how the\nintegration combines the strengths of the independent components. We hope that\nthis focused contribution will attract attention on the benefits of using such\nmixtures of experts in NLP.", + "authors": "Phong Le, Marc Dymetman, Jean-Michel Renders", + "published": "2016-05-05", + "updated": "2016-05-05", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.02629v2", + "title": "BA-MoE: Boundary-Aware Mixture-of-Experts Adapter for Code-Switching Speech Recognition", + "abstract": "Mixture-of-experts based models, which use language experts to extract\nlanguage-specific representations effectively, have been well applied in\ncode-switching automatic speech recognition. However, there is still\nsubstantial space to improve as similar pronunciation across languages may\nresult in ineffective multi-language modeling and inaccurate language boundary\nestimation. To eliminate these drawbacks, we propose a cross-layer language\nadapter and a boundary-aware training method, namely Boundary-Aware\nMixture-of-Experts (BA-MoE). Specifically, we introduce language-specific\nadapters to separate language-specific representations and a unified gating\nlayer to fuse representations within each encoder layer. Second, we compute\nlanguage adaptation loss of the mean output of each language-specific adapter\nto improve the adapter module's language-specific representation learning.\nBesides, we utilize a boundary-aware predictor to learn boundary\nrepresentations for dealing with language boundary confusion. Our approach\nachieves significant performance improvement, reducing the mixture error rate\nby 16.55\\% compared to the baseline on the ASRU 2019 Mandarin-English\ncode-switching challenge dataset.", + "authors": "Peikun Chen, Fan Yu, Yuhao Lian, Hongfei Xue, Xucheng Wan, Naijun Zheng, Huan Zhou, Lei Xie", + "published": "2023-10-04", + "updated": "2023-10-08", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.13750v1", + "title": "MoLE : Mixture of Language Experts for Multi-Lingual Automatic Speech Recognition", + "abstract": "Multi-lingual speech recognition aims to distinguish linguistic expressions\nin different languages and integrate acoustic processing simultaneously. In\ncontrast, current multi-lingual speech recognition research follows a\nlanguage-aware paradigm, mainly targeted to improve recognition performance\nrather than discriminate language characteristics. In this paper, we present a\nmulti-lingual speech recognition network named\nMixture-of-Language-Expert(MoLE), which digests speech in a variety of\nlanguages. Specifically, MoLE analyzes linguistic expression from input speech\nin arbitrary languages, activating a language-specific expert with a\nlightweight language tokenizer. The tokenizer not only activates experts, but\nalso estimates the reliability of the activation. Based on the reliability, the\nactivated expert and the language-agnostic expert are aggregated to represent\nlanguage-conditioned embedding for efficient speech recognition. Our proposed\nmodel is evaluated in 5 languages scenario, and the experimental results show\nthat our structure is advantageous on multi-lingual recognition, especially for\nspeech in low-resource language.", + "authors": "Yoohwan Kwon, Soo-Whan Chung", + "published": "2023-02-27", + "updated": "2023-02-27", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.CL", + "cs.SD" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1806.08200v1", + "title": "Mixtures of Experts Models", + "abstract": "Mixtures of experts models provide a framework in which covariates may be\nincluded in mixture models. This is achieved by modelling the parameters of the\nmixture model as functions of the concomitant covariates. Given their mixture\nmodel foundation, mixtures of experts models possess a diverse range of\nanalytic uses, from clustering observations to capturing parameter\nheterogeneity in cross-sectional data. This chapter focuses on delineating the\nmixture of experts modelling framework and demonstrates the utility and\nflexibility of mixtures of experts models as an analytic tool.", + "authors": "Isobel Claire Gormley, Sylvia Fr\u00fchwirth-Schnatter", + "published": "2018-06-21", + "updated": "2018-06-21", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2105.11706v1", + "title": "Mixture of ELM based experts with trainable gating network", + "abstract": "Mixture of experts method is a neural network based ensemble learning that\nhas great ability to improve the overall classification accuracy. This method\nis based on the divide and conquer principle, in which the problem space is\ndivided between several experts by supervisition of gating network. In this\npaper, we propose an ensemble learning method based on mixture of experts which\nis named mixture of ELM based experts with trainable gating network (MEETG) to\nimprove the computing cost and to speed up the learning process of ME. The\nstructure of ME consists of multi layer perceptrons (MLPs) as base experts and\ngating network, in which gradient-based learning algorithm is applied for\ntraining the MLPs which is an iterative and time consuming process. In order to\novercome on these problems, we use the advantages of extreme learning machine\n(ELM) for designing the structure of ME. ELM as a learning algorithm for single\nhidden-layer feed forward neural networks provides much faster learning process\nand better generalization ability in comparision with some other traditional\nlearning algorithms. Also, in the proposed method a trainable gating network is\napplied to aggregate the outputs of the experts dynamically according to the\ninput sample. Our experimental results and statistical analysis on 11 benchmark\ndatasets confirm that MEETG has an acceptable performance in classification\nproblems. Furthermore, our experimental results show that the proposed approach\noutperforms the original ELM on prediction stability and classification\naccuracy.", + "authors": "Laleh Armi, Elham Abbasi, Jamal Zarepour-Ahmadabadi", + "published": "2021-05-25", + "updated": "2021-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.07816v1", + "title": "Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM", + "abstract": "We investigate efficient methods for training Large Language Models (LLMs) to\npossess capabilities in multiple specialized domains, such as coding, math\nreasoning and world knowledge. Our method, named Branch-Train-MiX (BTX), starts\nfrom a seed model, which is branched to train experts in embarrassingly\nparallel fashion with high throughput and reduced communication cost. After\nindividual experts are asynchronously trained, BTX brings together their\nfeedforward parameters as experts in Mixture-of-Expert (MoE) layers and\naverages the remaining parameters, followed by an MoE-finetuning stage to learn\ntoken-level routing. BTX generalizes two special cases, the Branch-Train-Merge\nmethod, which does not have the MoE finetuning stage to learn routing, and\nsparse upcycling, which omits the stage of training experts asynchronously.\nCompared to alternative approaches, BTX achieves the best accuracy-efficiency\ntradeoff.", + "authors": "Sainbayar Sukhbaatar, Olga Golovneva, Vasu Sharma, Hu Xu, Xi Victoria Lin, Baptiste Rozi\u00e8re, Jacob Kahn, Daniel Li, Wen-tau Yih, Jason Weston, Xian Li", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2308.00951v1", + "title": "From Sparse to Soft Mixtures of Experts", + "abstract": "Sparse mixture of expert architectures (MoEs) scale model capacity without\nlarge increases in training or inference costs. Despite their success, MoEs\nsuffer from a number of issues: training instability, token dropping, inability\nto scale the number of experts, or ineffective finetuning. In this work, we\nproposeSoft MoE, a fully-differentiable sparse Transformer that addresses these\nchallenges, while maintaining the benefits of MoEs. Soft MoE performs an\nimplicit soft assignment by passing different weighted combinations of all\ninput tokens to each expert. As in other MoE works, experts in Soft MoE only\nprocess a subset of the (combined) tokens, enabling larger model capacity at\nlower inference cost. In the context of visual recognition, Soft MoE greatly\noutperforms standard Transformers (ViTs) and popular MoE variants (Tokens\nChoice and Experts Choice). For example, Soft MoE-Base/16 requires 10.5x lower\ninference cost (5.7x lower wall-clock time) than ViT-Huge/14 while matching its\nperformance after similar training. Soft MoE also scales well: Soft MoE Huge/14\nwith 128 experts in 16 MoE layers has over 40x more parameters than ViT\nHuge/14, while inference time cost grows by only 2%, and it performs\nsubstantially better.", + "authors": "Joan Puigcerver, Carlos Riquelme, Basil Mustafa, Neil Houlsby", + "published": "2023-08-02", + "updated": "2023-08-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2305.03288v2", + "title": "Demystifying Softmax Gating Function in Gaussian Mixture of Experts", + "abstract": "Understanding the parameter estimation of softmax gating Gaussian mixture of\nexperts has remained a long-standing open problem in the literature. It is\nmainly due to three fundamental theoretical challenges associated with the\nsoftmax gating function: (i) the identifiability only up to the translation of\nparameters; (ii) the intrinsic interaction via partial differential equations\nbetween the softmax gating and the expert functions in the Gaussian density;\n(iii) the complex dependence between the numerator and denominator of the\nconditional density of softmax gating Gaussian mixture of experts. We resolve\nthese challenges by proposing novel Voronoi loss functions among parameters and\nestablishing the convergence rates of maximum likelihood estimator (MLE) for\nsolving parameter estimation in these models. When the true number of experts\nis unknown and over-specified, our findings show a connection between the\nconvergence rate of the MLE and a solvability problem of a system of polynomial\nequations.", + "authors": "Huy Nguyen, TrungTin Nguyen, Nhat Ho", + "published": "2023-05-05", + "updated": "2023-10-30", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "math.ST", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2009.06327v1", + "title": "Double-Wing Mixture of Experts for Streaming Recommendations", + "abstract": "Streaming Recommender Systems (SRSs) commonly train recommendation models on\nnewly received data only to address user preference drift, i.e., the changing\nuser preferences towards items. However, this practice overlooks the long-term\nuser preferences embedded in historical data. More importantly, the common\nheterogeneity in data stream greatly reduces the accuracy of streaming\nrecommendations. The reason is that different preferences (or characteristics)\nof different types of users (or items) cannot be well learned by a unified\nmodel. To address these two issues, we propose a Variational and\nReservoir-enhanced Sampling based Double-Wing Mixture of Experts framework,\ncalled VRS-DWMoE, to improve the accuracy of streaming recommendations. In\nVRS-DWMoE, we first devise variational and reservoir-enhanced sampling to\nwisely complement new data with historical data, and thus address the user\npreference drift issue while capturing long-term user preferences. After that,\nwe propose a Double-Wing Mixture of Experts (DWMoE) model to first effectively\nlearn heterogeneous user preferences and item characteristics, and then make\nrecommendations based on them. Specifically, DWMoE contains two Mixture of\nExperts (MoE, an effective ensemble learning model) to learn user preferences\nand item characteristics, respectively. Moreover, the multiple experts in each\nMoE learn the preferences (or characteristics) of different types of users (or\nitems) where each expert specializes in one underlying type. Extensive\nexperiments demonstrate that VRS-DWMoE consistently outperforms the\nstate-of-the-art SRSs.", + "authors": "Yan Zhao, Shoujin Wang, Yan Wang, Hongwei Liu, Weizhe Zhang", + "published": "2020-09-14", + "updated": "2020-09-14", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2405.00361v1", + "title": "AdaMoLE: Fine-Tuning Large Language Models with Adaptive Mixture of Low-Rank Adaptation Experts", + "abstract": "We introduce AdaMoLE, a novel method for fine-tuning large language models\n(LLMs) through an Adaptive Mixture of Low-Rank Adaptation (LoRA) Experts.\nMoving beyond conventional methods that employ a static top-k strategy for\nactivating experts, AdaMoLE dynamically adjusts the activation threshold using\na dedicated threshold network, adaptively responding to the varying\ncomplexities of different tasks. By replacing a single LoRA in a layer with\nmultiple LoRA experts and integrating a gating function with the threshold\nmechanism, AdaMoLE effectively selects and activates the most appropriate\nexperts based on the input context. Our extensive evaluations across a variety\nof commonsense reasoning and natural language processing tasks show that\nAdaMoLE exceeds baseline performance. This enhancement highlights the\nadvantages of AdaMoLE's adaptive selection of LoRA experts, improving model\neffectiveness without a corresponding increase in the expert count. The\nexperimental validation not only confirms AdaMoLE as a robust approach for\nenhancing LLMs but also suggests valuable directions for future research in\nadaptive expert selection mechanisms, potentially broadening the scope for\noptimizing model performance across diverse language processing tasks.", + "authors": "Zefang Liu, Jiahua Luo", + "published": "2024-05-01", + "updated": "2024-05-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.05838v1", + "title": "Liu-type Shrinkage Estimators for Mixture of Poisson Regressions with Experts: A Heart Disease Study", + "abstract": "Count data play a critical role in medical research, such as heart disease.\nThe Poisson regression model is a common technique for evaluating the impact of\na set of covariates on the count responses. The mixture of Poisson regression\nmodels with experts is a practical tool to exploit the covariates, not only to\nhandle the heterogeneity in the Poisson regressions but also to learn the\nmixing structure of the population. Multicollinearity is one of the most common\nchallenges with regression models, leading to ill-conditioned design matrices\nof Poisson regression components and expert classes. The maximum likelihood\nmethod produces unreliable and misleading estimates for the effects of the\ncovariates in multicollinearity. In this research, we develop Ridge and\nLiu-type methods as two shrinkage approaches to cope with the ill-conditioned\ndesign matrices of the mixture of Poisson regression models with experts.\nThrough various numerical studies, we demonstrate that the shrinkage methods\noffer more reliable estimates for the coefficients of the mixture model in\nmulticollinearity while maintaining the classification performance of the ML\nmethod. The shrinkage methods are finally applied to a heart study to analyze\nthe heart disease rate stages.", + "authors": "Elsayed Ghanem, Moein Yoosefi, Armin Hatefi", + "published": "2023-09-11", + "updated": "2023-09-11", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "stat.CO", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.14800v1", + "title": "Not All Experts are Equal: Efficient Expert Pruning and Skipping for Mixture-of-Experts Large Language Models", + "abstract": "A pivotal advancement in the progress of large language models (LLMs) is the\nemergence of the Mixture-of-Experts (MoE) LLMs. Compared to traditional LLMs,\nMoE LLMs can achieve higher performance with fewer parameters, but it is still\nhard to deploy them due to their immense parameter sizes. Different from\nprevious weight pruning methods that rely on specifically designed hardware,\nthis paper mainly aims to enhance the deployment efficiency of MoE LLMs by\nintroducing plug-and-play expert-level sparsification techniques. Specifically,\nwe propose, for the first time to our best knowledge, post-training approaches\nfor task-agnostic and task-specific expert pruning and skipping of MoE LLMs,\ntailored to improve deployment efficiency while maintaining model performance\nacross a wide range of tasks. Extensive experiments show that our proposed\nmethods can simultaneously reduce model sizes and increase the inference speed,\nwhile maintaining satisfactory performance. Data and code will be available at\nhttps://github.com/Lucky-Lance/Expert_Sparsity.", + "authors": "Xudong Lu, Qi Liu, Yuhui Xu, Aojun Zhou, Siyuan Huang, Bo Zhang, Junchi Yan, Hongsheng Li", + "published": "2024-02-22", + "updated": "2024-02-22", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1612.06879v1", + "title": "Robust mixture of experts modeling using the skew $t$ distribution", + "abstract": "Mixture of Experts (MoE) is a popular framework in the fields of statistics\nand machine learning for modeling heterogeneity in data for regression,\nclassification and clustering. MoE for continuous data are usually based on the\nnormal distribution. However, it is known that for data with asymmetric\nbehavior, heavy tails and atypical observations, the use of the normal\ndistribution is unsuitable. We introduce a new robust non-normal mixture of\nexperts modeling using the skew $t$ distribution. The proposed skew $t$ mixture\nof experts, named STMoE, handles these issues of the normal mixtures experts\nregarding possibly skewed, heavy-tailed and noisy data. We develop a dedicated\nexpectation conditional maximization (ECM) algorithm to estimate the model\nparameters by monotonically maximizing the observed data log-likelihood. We\ndescribe how the presented model can be used in prediction and in model-based\nclustering of regression data. Numerical experiments carried out on simulated\ndata show the effectiveness and the robustness of the proposed model in fitting\nnon-linear regression functions as well as in model-based clustering. Then, the\nproposed model is applied to the real-world data of tone perception for musical\ndata analysis, and the one of temperature anomalies for the analysis of climate\nchange data. The obtained results confirm the usefulness of the model for\npractical data analysis applications.", + "authors": "Faicel Chamroukhi", + "published": "2016-12-09", + "updated": "2016-12-09", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG", + "stat.ML", + "62, 62F, 62H30, 62h", + "G.3; I.2.6; I.5.1" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2209.13071v1", + "title": "Diversified Dynamic Routing for Vision Tasks", + "abstract": "Deep learning models for vision tasks are trained on large datasets under the\nassumption that there exists a universal representation that can be used to\nmake predictions for all samples. Whereas high complexity models are proven to\nbe capable of learning such representations, a mixture of experts trained on\nspecific subsets of the data can infer the labels more efficiently. However\nusing mixture of experts poses two new problems, namely (i) assigning the\ncorrect expert at inference time when a new unseen sample is presented. (ii)\nFinding the optimal partitioning of the training data, such that the experts\nrely the least on common features. In Dynamic Routing (DR) a novel architecture\nis proposed where each layer is composed of a set of experts, however without\naddressing the two challenges we demonstrate that the model reverts to using\nthe same subset of experts.\n In our method, Diversified Dynamic Routing (DivDR) the model is explicitly\ntrained to solve the challenge of finding relevant partitioning of the data and\nassigning the correct experts in an unsupervised approach. We conduct several\nexperiments on semantic segmentation on Cityscapes and object detection and\ninstance segmentation on MS-COCO showing improved performance over several\nbaselines.", + "authors": "Botos Csaba, Adel Bibi, Yanwei Li, Philip Torr, Ser-Nam Lim", + "published": "2022-09-26", + "updated": "2022-09-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09762v1", + "title": "Diversifying the Mixture-of-Experts Representation for Language Models with Orthogonal Optimizer", + "abstract": "The Mixture of Experts (MoE) has emerged as a highly successful technique in\ndeep learning, based on the principle of divide-and-conquer to maximize model\ncapacity without significant additional computational cost. Even in the era of\nlarge-scale language models (LLMs), MoE continues to play a crucial role, as\nsome researchers have indicated that GPT-4 adopts the MoE structure to ensure\ndiverse inference results. However, MoE is susceptible to performance\ndegeneracy, particularly evident in the issues of imbalance and homogeneous\nrepresentation among experts. While previous studies have extensively addressed\nthe problem of imbalance, the challenge of homogeneous representation remains\nunresolved. In this study, we shed light on the homogeneous representation\nproblem, wherein experts in the MoE fail to specialize and lack diversity,\nleading to frustratingly high similarities in their representations (up to 99%\nin a well-performed MoE model). This problem restricts the expressive power of\nthe MoE and, we argue, contradicts its original intention. To tackle this\nissue, we propose a straightforward yet highly effective solution: OMoE, an\northogonal expert optimizer. Additionally, we introduce an alternating training\nstrategy that encourages each expert to update in a direction orthogonal to the\nsubspace spanned by other experts. Our algorithm facilitates MoE training in\ntwo key ways: firstly, it explicitly enhances representation diversity, and\nsecondly, it implicitly fosters interaction between experts during orthogonal\nweights computation. Through extensive experiments, we demonstrate that our\nproposed optimization algorithm significantly improves the performance of\nfine-tuning the MoE model on the GLUE benchmark, SuperGLUE benchmark,\nquestion-answering task, and name entity recognition tasks.", + "authors": "Boan Liu, Liang Ding, Li Shen, Keqin Peng, Yu Cao, Dazhao Cheng, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-10-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2401.15969v2", + "title": "Routers in Vision Mixture of Experts: An Empirical Study", + "abstract": "Mixture-of-Experts (MoE) models are a promising way to scale up model\ncapacity without significantly increasing computational cost. A key component\nof MoEs is the router, which decides which subset of parameters (experts)\nprocess which feature embeddings (tokens). In this paper, we present a\ncomprehensive study of routers in MoEs for computer vision tasks. We introduce\na unified MoE formulation that subsumes different MoEs with two parametric\nrouting tensors. This formulation covers both sparse MoE, which uses a binary\nor hard assignment between experts and tokens, and soft MoE, which uses a soft\nassignment between experts and weighted combinations of tokens. Routers for\nsparse MoEs can be further grouped into two variants: Token Choice, which\nmatches experts to each token, and Expert Choice, which matches tokens to each\nexpert. We conduct head-to-head experiments with 6 different routers, including\nexisting routers from prior work and new ones we introduce. We show that (i)\nmany routers originally developed for language modeling can be adapted to\nperform strongly in vision tasks, (ii) in sparse MoE, Expert Choice routers\ngenerally outperform Token Choice routers, and (iii) soft MoEs generally\noutperform sparse MoEs with a fixed compute budget. These results provide new\ninsights regarding the crucial role of routers in vision MoE models.", + "authors": "Tianlin Liu, Mathieu Blondel, Carlos Riquelme, Joan Puigcerver", + "published": "2024-01-29", + "updated": "2024-04-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2208.02813v1", + "title": "Towards Understanding Mixture of Experts in Deep Learning", + "abstract": "The Mixture-of-Experts (MoE) layer, a sparsely-activated model controlled by\na router, has achieved great success in deep learning. However, the\nunderstanding of such architecture remains elusive. In this paper, we formally\nstudy how the MoE layer improves the performance of neural network learning and\nwhy the mixture model will not collapse into a single model. Our empirical\nresults suggest that the cluster structure of the underlying problem and the\nnon-linearity of the expert are pivotal to the success of MoE. To further\nunderstand this, we consider a challenging classification problem with\nintrinsic cluster structures, which is hard to learn using a single expert. Yet\nwith the MoE layer, by choosing the experts as two-layer nonlinear\nconvolutional neural networks (CNNs), we show that the problem can be learned\nsuccessfully. Furthermore, our theory shows that the router can learn the\ncluster-center features, which helps divide the input complex problem into\nsimpler linear classification sub-problems that individual experts can conquer.\nTo our knowledge, this is the first result towards formally understanding the\nmechanism of the MoE layer for deep learning.", + "authors": "Zixiang Chen, Yihe Deng, Yue Wu, Quanquan Gu, Yuanzhi Li", + "published": "2022-08-04", + "updated": "2022-08-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.16610v1", + "title": "Efficient Deweather Mixture-of-Experts with Uncertainty-aware Feature-wise Linear Modulation", + "abstract": "The Mixture-of-Experts (MoE) approach has demonstrated outstanding\nscalability in multi-task learning including low-level upstream tasks such as\nconcurrent removal of multiple adverse weather effects. However, the\nconventional MoE architecture with parallel Feed Forward Network (FFN) experts\nleads to significant parameter and computational overheads that hinder its\nefficient deployment. In addition, the naive MoE linear router is suboptimal in\nassigning task-specific features to multiple experts which limits its further\nscalability. In this work, we propose an efficient MoE architecture with weight\nsharing across the experts. Inspired by the idea of linear feature modulation\n(FM), our architecture implicitly instantiates multiple experts via learnable\nactivation modulations on a single shared expert block. The proposed Feature\nModulated Expert (FME) serves as a building block for the novel\nMixture-of-Feature-Modulation-Experts (MoFME) architecture, which can scale up\nthe number of experts with low overhead. We further propose an\nUncertainty-aware Router (UaR) to assign task-specific features to different FM\nmodules with well-calibrated weights. This enables MoFME to effectively learn\ndiverse expert functions for multiple tasks. The conducted experiments on the\nmulti-deweather task show that our MoFME outperforms the baselines in the image\nrestoration quality by 0.1-0.2 dB and achieves SOTA-compatible performance\nwhile saving more than 72% of parameters and 39% inference time over the\nconventional MoE counterpart. Experiments on the downstream segmentation and\nclassification tasks further demonstrate the generalizability of MoFME to real\nopen-world applications.", + "authors": "Rongyu Zhang, Yulin Luo, Jiaming Liu, Huanrui Yang, Zhen Dong, Denis Gudovskiy, Tomoyuki Okuno, Yohei Nakata, Kurt Keutzer, Yuan Du, Shanghang Zhang", + "published": "2023-12-27", + "updated": "2023-12-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2403.08245v1", + "title": "Scattered Mixture-of-Experts Implementation", + "abstract": "We present ScatterMoE, an implementation of Sparse Mixture-of-Experts (SMoE)\non GPUs. ScatterMoE builds upon existing implementations, and overcoming some\nof the limitations to improve inference and training speed, and memory\nfootprint. This implementation achieves this by avoiding padding and making\nexcessive copies of the input. We introduce ParallelLinear, the main component\nwe use to build our implementation and the various kernels used to speed up the\noperation. We benchmark our implementation against Megablocks, and show that it\nenables a higher throughput and lower memory footprint. We also show how\nParallelLinear enables extension of the Mixture-of-Experts concept by\ndemonstrating with an implementation of Mixture of Attention.", + "authors": "Shawn Tan, Yikang Shen, Rameswar Panda, Aaron Courville", + "published": "2024-03-13", + "updated": "2024-03-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DC" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2402.12550v1", + "title": "Multilinear Mixture of Experts: Scalable Expert Specialization through Factorization", + "abstract": "The Mixture of Experts (MoE) paradigm provides a powerful way to decompose\ninscrutable dense layers into smaller, modular computations often more amenable\nto human interpretation, debugging, and editability. A major problem however\nlies in the computational cost of scaling the number of experts to achieve\nsufficiently fine-grained specialization. In this paper, we propose the\nMultilinear Mixutre of Experts (MMoE) layer to address this, focusing on vision\nmodels. MMoE layers perform an implicit computation on prohibitively large\nweight tensors entirely in factorized form. Consequently, MMoEs both (1) avoid\nthe issues incurred through the discrete expert routing in the popular 'sparse'\nMoE models, yet (2) do not incur the restrictively high inference-time costs of\n'soft' MoE alternatives. We present both qualitative and quantitative evidence\n(through visualization and counterfactual interventions respectively) that\nscaling MMoE layers when fine-tuning foundation models for vision tasks leads\nto more specialized experts at the class-level whilst remaining competitive\nwith the performance of parameter-matched linear layer counterparts. Finally,\nwe show that learned expert specialism further facilitates manual correction of\ndemographic bias in CelebA attribute classification. Our MMoE model code is\navailable at https://github.com/james-oldfield/MMoE.", + "authors": "James Oldfield, Markos Georgopoulos, Grigorios G. Chrysos, Christos Tzelepis, Yannis Panagakis, Mihalis A. Nicolaou, Jiankang Deng, Ioannis Patras", + "published": "2024-02-19", + "updated": "2024-02-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1903.07756v1", + "title": "Hierarchical Routing Mixture of Experts", + "abstract": "In regression tasks the distribution of the data is often too complex to be\nfitted by a single model. In contrast, partition-based models are developed\nwhere data is divided and fitted by local models. These models partition the\ninput space and do not leverage the input-output dependency of\nmultimodal-distributed data, and strong local models are needed to make good\npredictions. Addressing these problems, we propose a binary tree-structured\nhierarchical routing mixture of experts (HRME) model that has classifiers as\nnon-leaf node experts and simple regression models as leaf node experts. The\nclassifier nodes jointly soft-partition the input-output space based on the\nnatural separateness of multimodal data. This enables simple leaf experts to be\neffective for prediction. Further, we develop a probabilistic framework for the\nHRME model, and propose a recursive Expectation-Maximization (EM) based\nalgorithm to learn both the tree structure and the expert models. Experiments\non a collection of regression tasks validate the effectiveness of our method\ncompared to a variety of other regression models.", + "authors": "Wenbo Zhao, Yang Gao, Shahan Ali Memon, Bhiksha Raj, Rita Singh", + "published": "2019-03-18", + "updated": "2019-03-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1811.10740v2", + "title": "Mixture of Regression Experts in fMRI Encoding", + "abstract": "fMRI semantic category understanding using linguistic encoding models attempt\nto learn a forward mapping that relates stimuli to the corresponding brain\nactivation. Classical encoding models use linear multi-variate methods to\npredict the brain activation (all voxels) given the stimulus. However, these\nmethods essentially assume multiple regions as one large uniform region or\nseveral independent regions, ignoring connections among them. In this paper, we\npresent a mixture of experts-based model where a group of experts captures\nbrain activity patterns related to particular regions of interest (ROI) and\nalso show the discrimination across different experts. The model is trained\nword stimuli encoded as 25-dimensional feature vectors as input and the\ncorresponding brain responses as output. Given a new word (25-dimensional\nfeature vector), it predicts the entire brain activation as the linear\ncombination of multiple experts brain activations. We argue that each expert\nlearns a certain region of brain activations corresponding to its category of\nwords, which solves the problem of identifying the regions with a simple\nencoding model. We showcase that proposed mixture of experts-based model indeed\nlearns region-based experts to predict the brain activations with high spatial\naccuracy.", + "authors": "Subba Reddy Oota, Adithya Avvaru, Naresh Manwani, Raju S. Bapi", + "published": "2018-11-26", + "updated": "2018-12-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.HC", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2008.09662v1", + "title": "Biased Mixtures Of Experts: Enabling Computer Vision Inference Under Data Transfer Limitations", + "abstract": "We propose a novel mixture-of-experts class to optimize computer vision\nmodels in accordance with data transfer limitations at test time. Our approach\npostulates that the minimum acceptable amount of data allowing for\nhighly-accurate results can vary for different input space partitions.\nTherefore, we consider mixtures where experts require different amounts of\ndata, and train a sparse gating function to divide the input space for each\nexpert. By appropriate hyperparameter selection, our approach is able to bias\nmixtures of experts towards selecting specific experts over others. In this\nway, we show that the data transfer optimization between visual sensing and\nprocessing can be solved as a convex optimization problem.To demonstrate the\nrelation between data availability and performance, we evaluate biased mixtures\non a range of mainstream computer vision problems, namely: (i) single shot\ndetection, (ii) image super resolution, and (iii) realtime video action\nclassification. For all cases, and when experts constitute modified baselines\nto meet different limits on allowed data utility, biased mixtures significantly\noutperform previous work optimized to meet the same constraints on available\ndata.", + "authors": "Alhabib Abbas, Yiannis Andreopoulos", + "published": "2020-08-21", + "updated": "2020-08-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.IV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2312.03292v1", + "title": "Enhancing Molecular Property Prediction via Mixture of Collaborative Experts", + "abstract": "Molecular Property Prediction (MPP) task involves predicting biochemical\nproperties based on molecular features, such as molecular graph structures,\ncontributing to the discovery of lead compounds in drug development. To address\ndata scarcity and imbalance in MPP, some studies have adopted Graph Neural\nNetworks (GNN) as an encoder to extract commonalities from molecular graphs.\nHowever, these approaches often use a separate predictor for each task,\nneglecting the shared characteristics among predictors corresponding to\ndifferent tasks. In response to this limitation, we introduce the GNN-MoCE\narchitecture. It employs the Mixture of Collaborative Experts (MoCE) as\npredictors, exploiting task commonalities while confronting the homogeneity\nissue in the expert pool and the decision dominance dilemma within the expert\ngroup. To enhance expert diversity for collaboration among all experts, the\nExpert-Specific Projection method is proposed to assign a unique projection\nperspective to each expert. To balance decision-making influence for\ncollaboration within the expert group, the Expert-Specific Loss is presented to\nintegrate individual expert loss into the weighted decision loss of the group\nfor more equitable training. Benefiting from the enhancements of MoCE in expert\ncreation, dynamic expert group formation, and experts' collaboration, our model\ndemonstrates superior performance over traditional methods on 24 MPP datasets,\nespecially in tasks with limited data or high imbalance.", + "authors": "Xu Yao, Shuang Liang, Songqiao Han, Hailiang Huang", + "published": "2023-12-06", + "updated": "2023-12-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "q-bio.QM" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2004.03751v4", + "title": "Robust Fitting of Mixture Models using Weighted Complete Estimating Equations", + "abstract": "Mixture modeling, which considers the potential heterogeneity in data, is\nwidely adopted for classification and clustering problems. Mixture models can\nbe estimated using the Expectation-Maximization algorithm, which works with the\ncomplete estimating equations conditioned by the latent membership variables of\nthe cluster assignment based on the hierarchical expression of mixture models.\nHowever, when the mixture components have light tails such as a normal\ndistribution, the mixture model can be sensitive to outliers. This study\nproposes a method of weighted complete estimating equations (WCE) for the\nrobust fitting of mixture models. Our WCE introduces weights to complete\nestimating equations such that the weights can automatically downweight the\noutliers. The weights are constructed similarly to the density power divergence\nfor mixture models, but in our WCE, they depend only on the component\ndistributions and not on the whole mixture. A novel\nexpectation-estimating-equation (EEE) algorithm is also developed to solve the\nWCE. For illustrative purposes, a multivariate Gaussian mixture, a mixture of\nexperts, and a multivariate skew normal mixture are considered, and how our EEE\nalgorithm can be implemented for these specific models is described. The\nnumerical performance of the proposed robust estimation method was examined\nusing simulated and real datasets.", + "authors": "Shonosuke Sugasawa, Genya Kobayashi", + "published": "2020-04-08", + "updated": "2022-03-17", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.09179v1", + "title": "SiRA: Sparse Mixture of Low Rank Adaptation", + "abstract": "Parameter Efficient Tuning has been an prominent approach to adapt the Large\nLanguage Model to downstream tasks. Most previous works considers adding the\ndense trainable parameters, where all parameters are used to adapt certain\ntask. We found this less effective empirically using the example of LoRA that\nintroducing more trainable parameters does not help. Motivated by this we\ninvestigate the importance of leveraging \"sparse\" computation and propose SiRA:\nsparse mixture of low rank adaption. SiRA leverages the Sparse Mixture of\nExpert(SMoE) to boost the performance of LoRA. Specifically it enforces the top\n$k$ experts routing with a capacity limit restricting the maximum number of\ntokens each expert can process. We propose a novel and simple expert dropout on\ntop of gating network to reduce the over-fitting issue. Through extensive\nexperiments, we verify SiRA performs better than LoRA and other mixture of\nexpert approaches across different single tasks and multitask settings.", + "authors": "Yun Zhu, Nevan Wichers, Chu-Cheng Lin, Xinyi Wang, Tianlong Chen, Lei Shu, Han Lu, Canoee Liu, Liangchen Luo, Jindong Chen, Lei Meng", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.10768v1", + "title": "Memory Augmented Language Models through Mixture of Word Experts", + "abstract": "Scaling up the number of parameters of language models has proven to be an\neffective approach to improve performance. For dense models, increasing model\nsize proportionally increases the model's computation footprint. In this work,\nwe seek to aggressively decouple learning capacity and FLOPs through\nMixture-of-Experts (MoE) style models with large knowledge-rich vocabulary\nbased routing functions and experts. Our proposed approach, dubbed Mixture of\nWord Experts (MoWE), can be seen as a memory augmented model, where a large set\nof word-specific experts play the role of a sparse memory. We demonstrate that\nMoWE performs significantly better than the T5 family of models with similar\nnumber of FLOPs in a variety of NLP tasks. Additionally, MoWE outperforms\nregular MoE models on knowledge intensive tasks and has similar performance to\nmore complex memory augmented approaches that often require to invoke custom\nmechanisms to search the sparse memory.", + "authors": "Cicero Nogueira dos Santos, James Lee-Thorp, Isaac Noble, Chung-Ching Chang, David Uthus", + "published": "2023-11-15", + "updated": "2023-11-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08396v1", + "title": "StableMoE: Stable Routing Strategy for Mixture of Experts", + "abstract": "The Mixture-of-Experts (MoE) technique can scale up the model size of\nTransformers with an affordable computational overhead. We point out that\nexisting learning-to-route MoE methods suffer from the routing fluctuation\nissue, i.e., the target expert of the same input may change along with\ntraining, but only one expert will be activated for the input during inference.\nThe routing fluctuation tends to harm sample efficiency because the same input\nupdates different experts but only one is finally used. In this paper, we\npropose StableMoE with two training stages to address the routing fluctuation\nproblem. In the first training stage, we learn a balanced and cohesive routing\nstrategy and distill it into a lightweight router decoupled from the backbone\nmodel. In the second training stage, we utilize the distilled router to\ndetermine the token-to-expert assignment and freeze it for a stable routing\nstrategy. We validate our method on language modeling and multilingual machine\ntranslation. The results show that StableMoE outperforms existing MoE methods\nin terms of both convergence speed and performance.", + "authors": "Damai Dai, Li Dong, Shuming Ma, Bo Zheng, Zhifang Sui, Baobao Chang, Furu Wei", + "published": "2022-04-18", + "updated": "2022-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1312.4314v3", + "title": "Learning Factored Representations in a Deep Mixture of Experts", + "abstract": "Mixtures of Experts combine the outputs of several \"expert\" networks, each of\nwhich specializes in a different part of the input space. This is achieved by\ntraining a \"gating\" network that maps each input to a distribution over the\nexperts. Such models show promise for building larger networks that are still\ncheap to compute at test time, and more parallelizable at training time. In\nthis this work, we extend the Mixture of Experts to a stacked model, the Deep\nMixture of Experts, with multiple sets of gating and experts. This\nexponentially increases the number of effective experts by associating each\ninput with a combination of experts at each layer, yet maintains a modest model\nsize. On a randomly translated version of the MNIST dataset, we find that the\nDeep Mixture of Experts automatically learns to develop location-dependent\n(\"where\") experts at the first layer, and class-specific (\"what\") experts at\nthe second layer. In addition, we see that the different combinations are in\nuse when the model is applied to a dataset of speech monophones. These\ndemonstrate effective use of all expert combinations.", + "authors": "David Eigen, Marc'Aurelio Ranzato, Ilya Sutskever", + "published": "2013-12-16", + "updated": "2014-03-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2012.02130v4", + "title": "A similarity-based Bayesian mixture-of-experts model", + "abstract": "We present a new nonparametric mixture-of-experts model for multivariate\nregression problems, inspired by the probabilistic k-nearest neighbors\nalgorithm. Using a conditionally specified model, predictions for out-of-sample\ninputs are based on similarities to each observed data point, yielding\npredictive distributions represented by Gaussian mixtures. Posterior inference\nis performed on the parameters of the mixture components as well as the\ndistance metric using a mean-field variational Bayes algorithm accompanied with\na stochastic gradient-based optimization procedure. The proposed method is\nespecially advantageous in settings where inputs are of relatively high\ndimension in comparison to the data size, where input-output relationships are\ncomplex, and where predictive distributions may be skewed or multimodal.\nComputational studies on five datasets, of which two are synthetically\ngenerated, illustrate clear advantages of our mixture-of-experts method for\nhigh-dimensional inputs, outperforming competitor models both in terms of\nvalidation metrics and visual inspection.", + "authors": "Tianfang Zhang, Rasmus Bokrantz, Jimmy Olsson", + "published": "2020-12-03", + "updated": "2022-08-03", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "stat.ME" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2311.04894v1", + "title": "DAMEX: Dataset-aware Mixture-of-Experts for visual understanding of mixture-of-datasets", + "abstract": "Construction of a universal detector poses a crucial question: How can we\nmost effectively train a model on a large mixture of datasets? The answer lies\nin learning dataset-specific features and ensembling their knowledge but do all\nthis in a single model. Previous methods achieve this by having separate\ndetection heads on a common backbone but that results in a significant increase\nin parameters. In this work, we present Mixture-of-Experts as a solution,\nhighlighting that MoEs are much more than a scalability tool. We propose\nDataset-Aware Mixture-of-Experts, DAMEX where we train the experts to become an\n`expert' of a dataset by learning to route each dataset tokens to its mapped\nexpert. Experiments on Universal Object-Detection Benchmark show that we\noutperform the existing state-of-the-art by average +10.2 AP score and improve\nover our non-MoE baseline by average +2.0 AP score. We also observe consistent\ngains while mixing datasets with (1) limited availability, (2) disparate\ndomains and (3) divergent label sets. Further, we qualitatively show that DAMEX\nis robust against expert representation collapse.", + "authors": "Yash Jain, Harkirat Behl, Zsolt Kira, Vibhav Vineet", + "published": "2023-11-08", + "updated": "2023-11-08", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.10598v3", + "title": "Sparsely-gated Mixture-of-Expert Layers for CNN Interpretability", + "abstract": "Sparsely-gated Mixture of Expert (MoE) layers have been recently successfully\napplied for scaling large transformers, especially for language modeling tasks.\nAn intriguing side effect of sparse MoE layers is that they convey inherent\ninterpretability to a model via natural expert specialization. In this work, we\napply sparse MoE layers to CNNs for computer vision tasks and analyze the\nresulting effect on model interpretability. To stabilize MoE training, we\npresent both soft and hard constraint-based approaches. With hard constraints,\nthe weights of certain experts are allowed to become zero, while soft\nconstraints balance the contribution of experts with an additional auxiliary\nloss. As a result, soft constraints handle expert utilization better and\nsupport the expert specialization process, while hard constraints maintain more\ngeneralized experts and increase overall model performance. Our findings\ndemonstrate that experts can implicitly focus on individual sub-domains of the\ninput space. For example, experts trained for CIFAR-100 image classification\nspecialize in recognizing different domains such as flowers or animals without\nprevious data clustering. Experiments with RetinaNet and the COCO dataset\nfurther indicate that object detection experts can also specialize in detecting\nobjects of distinct sizes.", + "authors": "Svetlana Pavlitska, Christian Hubschneider, Lukas Struppek, J. Marius Z\u00f6llner", + "published": "2022-04-22", + "updated": "2023-04-27", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.14703v1", + "title": "Improving Expert Specialization in Mixture of Experts", + "abstract": "Mixture of experts (MoE), introduced over 20 years ago, is the simplest gated\nmodular neural network architecture. There is renewed interest in MoE because\nthe conditional computation allows only parts of the network to be used during\neach inference, as was recently demonstrated in large scale natural language\nprocessing models. MoE is also of potential interest for continual learning, as\nexperts may be reused for new tasks, and new experts introduced. The gate in\nthe MoE architecture learns task decompositions and individual experts learn\nsimpler functions appropriate to the gate's decomposition. In this paper: (1)\nwe show that the original MoE architecture and its training method do not\nguarantee intuitive task decompositions and good expert utilization, indeed\nthey can fail spectacularly even for simple data such as MNIST and\nFashionMNIST; (2) we introduce a novel gating architecture, similar to\nattention, that improves performance and results in a lower entropy task\ndecomposition; and (3) we introduce a novel data-driven regularization that\nimproves expert specialization. We empirically validate our methods on MNIST,\nFashionMNIST and CIFAR-100 datasets.", + "authors": "Yamuna Krishnamurthy, Chris Watkins, Thomas Gaertner", + "published": "2023-02-28", + "updated": "2023-02-28", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2302.02043v1", + "title": "mixdistreg: An R Package for Fitting Mixture of Experts Distributional Regression with Adaptive First-order Methods", + "abstract": "This paper presents a high-level description of the R software package\nmixdistreg to fit mixture of experts distributional regression models. The\nproposed framework is implemented in R using the deepregression software\ntemplate, which is based on TensorFlow and follows the neural structured\nadditive learning principle. The software comprises various approaches as\nspecial cases, including mixture density networks and mixture regression\napproaches. Various code examples are given to demonstrate the package's\nfunctionality.", + "authors": "David R\u00fcgamer", + "published": "2023-02-04", + "updated": "2023-02-04", + "primary_cat": "stat.CO", + "cats": [ + "stat.CO" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2202.09368v2", + "title": "Mixture-of-Experts with Expert Choice Routing", + "abstract": "Sparsely-activated Mixture-of-experts (MoE) models allow the number of\nparameters to greatly increase while keeping the amount of computation for a\ngiven token or a given sample unchanged. However, a poor expert routing\nstrategy (e.g. one resulting in load imbalance) can cause certain experts to be\nunder-trained, leading to an expert being under or over-specialized. Prior work\nallocates a fixed number of experts to each token using a top-k function\nregardless of the relative importance of different tokens. To address this, we\npropose a heterogeneous mixture-of-experts employing an expert choice method.\nInstead of letting tokens select the top-k experts, we have experts selecting\nthe top-k tokens. As a result, each token can be routed to a variable number of\nexperts and each expert can have a fixed bucket size. We systematically study\npre-training speedups using the same computational resources of the Switch\nTransformer top-1 and GShard top-2 gating of prior work and find that our\nmethod improves training convergence time by more than 2x. For the same\ncomputational cost, our method demonstrates higher performance in fine-tuning\n11 selected tasks in the GLUE and SuperGLUE benchmarks. For a smaller\nactivation cost, our method outperforms the T5 dense model in 7 out of the 11\ntasks.", + "authors": "Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, James Laudon", + "published": "2022-02-18", + "updated": "2022-10-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2309.14976v4", + "title": "MoCaE: Mixture of Calibrated Experts Significantly Improves Object Detection", + "abstract": "Combining the strengths of many existing predictors to obtain a Mixture of\nExperts which is superior to its individual components is an effective way to\nimprove the performance without having to develop new architectures or train a\nmodel from scratch. However, surprisingly, we find that na\\\"ively combining\nexpert object detectors in a similar way to Deep Ensembles, can often lead to\ndegraded performance. We identify that the primary cause of this issue is that\nthe predictions of the experts do not match their performance, a term referred\nto as miscalibration. Consequently, the most confident detector dominates the\nfinal predictions, preventing the mixture from leveraging all the predictions\nfrom the experts appropriately. To address this, when constructing the Mixture\nof Experts, we propose to combine their predictions in a manner which reflects\nthe individual performance of the experts; an objective we achieve by first\ncalibrating the predictions before filtering and refining them. We term this\napproach the Mixture of Calibrated Experts and demonstrate its effectiveness\nthrough extensive experiments on 5 different detection tasks using a variety of\ndetectors, showing that it: (i) improves object detectors on COCO and instance\nsegmentation methods on LVIS by up to $\\sim 2.5$ AP; (ii) reaches\nstate-of-the-art on COCO test-dev with $65.1$ AP and on DOTA with $82.62$\n$\\mathrm{AP_{50}}$; (iii) outperforms single models consistently on recent\ndetection tasks such as Open Vocabulary Object Detection.", + "authors": "Kemal Oksuz, Selim Kuzucu, Tom Joy, Puneet K. Dokania", + "published": "2023-09-26", + "updated": "2024-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2010.14260v2", + "title": "Concentric mixtures of Mallows models for top-$k$ rankings: sampling and identifiability", + "abstract": "In this paper, we consider mixtures of two Mallows models for top-$k$\nrankings, both with the same location parameter but with different scale\nparameters, i.e., a mixture of concentric Mallows models. This situation arises\nwhen we have a heterogeneous population of voters formed by two homogeneous\npopulations, one of which is a subpopulation of expert voters while the other\nincludes the non-expert voters. We propose efficient sampling algorithms for\nMallows top-$k$ rankings. We show the identifiability of both components, and\nthe learnability of their respective parameters in this setting by, first,\nbounding the sample complexity for the Borda algorithm with top-$k$ rankings\nand second, proposing polynomial time algorithm for the separation of the\nrankings in each component. Finally, since the rank aggregation will suffer\nfrom a large amount of noise introduced by the non-expert voters, we adapt the\nBorda algorithm to be able to recover the ground truth consensus ranking which\nis especially consistent with the expert rankings.", + "authors": "Collas Fabien, Irurozki Ekhine", + "published": "2020-10-27", + "updated": "2020-11-05", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.AI", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1901.10668v2", + "title": "Doubly Sparse: Sparse Mixture of Sparse Experts for Efficient Softmax Inference", + "abstract": "Computations for the softmax function are significantly expensive when the\nnumber of output classes is large. In this paper, we present a novel softmax\ninference speedup method, Doubly Sparse Softmax (DS-Softmax), that leverages\nsparse mixture of sparse experts to efficiently retrieve top-k classes.\nDifferent from most existing methods that require and approximate a fixed\nsoftmax, our method is learning-based and can adapt softmax weights for a\nbetter inference speedup. In particular, our method learns a two-level\nhierarchy which divides entire output class space into several partially\noverlapping experts. Each expert is sparse and only contains a subset of output\nclasses. To find top-k classes, a sparse mixture enables us to find the most\nprobable expert quickly, and the sparse expert enables us to search within a\nsmall-scale softmax. We empirically conduct evaluation on several real-world\ntasks, including neural machine translation, language modeling and image\nclassification, and demonstrate that significant computation reductions can be\nachieved at no performance loss.", + "authors": "Shun Liao, Ting Chen, Tian Lin, Denny Zhou, Chong Wang", + "published": "2019-01-30", + "updated": "2019-07-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2304.02806v2", + "title": "Graph Mixture of Experts: Learning on Large-Scale Graphs with Explicit Diversity Modeling", + "abstract": "Graph neural networks (GNNs) have found extensive applications in learning\nfrom graph data. However, real-world graphs often possess diverse structures\nand comprise nodes and edges of varying types. To bolster the generalization\ncapacity of GNNs, it has become customary to augment training graph structures\nthrough techniques like graph augmentations and large-scale pre-training on a\nwider array of graphs. Balancing this diversity while avoiding increased\ncomputational costs and the notorious trainability issues of GNNs is crucial.\nThis study introduces the concept of Mixture-of-Experts (MoE) to GNNs, with the\naim of augmenting their capacity to adapt to a diverse range of training graph\nstructures, without incurring explosive computational overhead. The proposed\nGraph Mixture of Experts (GMoE) model empowers individual nodes in the graph to\ndynamically and adaptively select more general information aggregation experts.\nThese experts are trained to capture distinct subgroups of graph structures and\nto incorporate information with varying hop sizes, where those with larger hop\nsizes specialize in gathering information over longer distances. The\neffectiveness of GMoE is validated through a series of experiments on a diverse\nset of tasks, including graph, node, and link prediction, using the OGB\nbenchmark. Notably, it enhances ROC-AUC by $1.81\\%$ in ogbg-molhiv and by\n$1.40\\%$ in ogbg-molbbbp, when compared to the non-MoE baselines. Our code is\npublicly available at https://github.com/VITA-Group/Graph-Mixture-of-Experts.", + "authors": "Haotao Wang, Ziyu Jiang, Yuning You, Yan Han, Gaowen Liu, Jayanth Srinivasa, Ramana Rao Kompella, Zhangyang Wang", + "published": "2023-04-06", + "updated": "2023-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2310.09832v3", + "title": "Merging Experts into One: Improving Computational Efficiency of Mixture of Experts", + "abstract": "Scaling the size of language models usually leads to remarkable advancements\nin NLP tasks. But it often comes with a price of growing computational cost.\nAlthough a sparse Mixture of Experts (MoE) can reduce the cost by activating a\nsmall subset of parameters (e.g., one expert) for each input, its computation\nescalates significantly if increasing the number of activated experts, limiting\nits practical utility. Can we retain the advantages of adding more experts\nwithout substantially increasing the computational costs? In this paper, we\nfirst demonstrate the superiority of selecting multiple experts and then\npropose a computation-efficient approach called \\textbf{\\texttt{Merging Experts\ninto One}} (MEO), which reduces the computation cost to that of a single\nexpert. Extensive experiments show that MEO significantly improves\ncomputational efficiency, e.g., FLOPS drops from 72.0G of vanilla MoE to 28.6G\n(MEO). Moreover, we propose a token-level attention block that further enhances\nthe efficiency and performance of token-level MEO, e.g., 83.3\\% (MEO) vs.\n82.6\\% (vanilla MoE) average score on the GLUE benchmark. Our code will be\nreleased upon acceptance. Code will be released at:\n\\url{https://github.com/Shwai-He/MEO}.", + "authors": "Shwai He, Run-Ze Fan, Liang Ding, Li Shen, Tianyi Zhou, Dacheng Tao", + "published": "2023-10-15", + "updated": "2023-11-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2204.08753v1", + "title": "Table-based Fact Verification with Self-adaptive Mixture of Experts", + "abstract": "The table-based fact verification task has recently gained widespread\nattention and yet remains to be a very challenging problem. It inherently\nrequires informative reasoning over natural language together with different\nnumerical and logical reasoning on tables (e.g., count, superlative,\ncomparative). Considering that, we exploit mixture-of-experts and present in\nthis paper a new method: Self-adaptive Mixture-of-Experts Network (SaMoE).\nSpecifically, we have developed a mixture-of-experts neural network to\nrecognize and execute different types of reasoning -- the network is composed\nof multiple experts, each handling a specific part of the semantics for\nreasoning, whereas a management module is applied to decide the contribution of\neach expert network to the verification result. A self-adaptive method is\ndeveloped to teach the management module combining results of different experts\nmore efficiently without external knowledge. The experimental results\nillustrate that our framework achieves 85.1% accuracy on the benchmark dataset\nTabFact, comparable with the previous state-of-the-art models. We hope our\nframework can serve as a new baseline for table-based verification. Our code is\navailable at https://github.com/THUMLP/SaMoE.", + "authors": "Yuxuan Zhou, Xien Liu, Kaiyin Zhou, Ji Wu", + "published": "2022-04-19", + "updated": "2022-04-19", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.00372v1", + "title": "Visual Saliency Prediction Using a Mixture of Deep Neural Networks", + "abstract": "Visual saliency models have recently begun to incorporate deep learning to\nachieve predictive capacity much greater than previous unsupervised methods.\nHowever, most existing models predict saliency using local mechanisms limited\nto the receptive field of the network. We propose a model that incorporates\nglobal scene semantic information in addition to local information gathered by\na convolutional neural network. Our model is formulated as a mixture of\nexperts. Each expert network is trained to predict saliency for a set of\nclosely related images. The final saliency map is computed as a weighted\nmixture of the expert networks' output, with weights determined by a separate\ngating network. This gating network is guided by global scene information to\npredict weights. The expert networks and the gating network are trained\nsimultaneously in an end-to-end manner. We show that our mixture formulation\nleads to improvement in performance over an otherwise identical non-mixture\nmodel that does not incorporate global scene information.", + "authors": "Samuel Dodge, Lina Karam", + "published": "2017-02-01", + "updated": "2017-02-01", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2206.00277v2", + "title": "Task-Specific Expert Pruning for Sparse Mixture-of-Experts", + "abstract": "The sparse Mixture-of-Experts (MoE) model is powerful for large-scale\npre-training and has achieved promising results due to its model capacity.\nHowever, with trillions of parameters, MoE is hard to be deployed on cloud or\nmobile environment. The inference of MoE requires expert parallelism, which is\nnot hardware-friendly and communication expensive. Especially for\nresource-limited downstream tasks, such sparse structure has to sacrifice a lot\nof computing efficiency for limited performance gains. In this work, we observe\nmost experts contribute scarcely little to the MoE fine-tuning and inference.\nWe further propose a general method to progressively drop the non-professional\nexperts for the target downstream task, which preserves the benefits of MoE\nwhile reducing the MoE model into one single-expert dense model. Our\nexperiments reveal that the fine-tuned single-expert model could preserve 99.3%\nbenefits from MoE across six different types of tasks while enjoying 2x\ninference speed with free communication cost.", + "authors": "Tianyu Chen, Shaohan Huang, Yuan Xie, Binxing Jiao, Daxin Jiang, Haoyi Zhou, Jianxin Li, Furu Wei", + "published": "2022-06-01", + "updated": "2022-06-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2011.01613v1", + "title": "Towards a Universal Gating Network for Mixtures of Experts", + "abstract": "The combination and aggregation of knowledge from multiple neural networks\ncan be commonly seen in the form of mixtures of experts. However, such\ncombinations are usually done using networks trained on the same tasks, with\nlittle mention of the combination of heterogeneous pre-trained networks,\nespecially in the data-free regime. This paper proposes multiple data-free\nmethods for the combination of heterogeneous neural networks, ranging from the\nutilization of simple output logit statistics, to training specialized gating\nnetworks. The gating networks decide whether specific inputs belong to specific\nnetworks based on the nature of the expert activations generated. The\nexperiments revealed that the gating networks, including the universal gating\napproach, constituted the most accurate approach, and therefore represent a\npragmatic step towards applications with heterogeneous mixtures of experts in a\ndata-free regime. The code for this project is hosted on github at\nhttps://github.com/cwkang1998/network-merging.", + "authors": "Chen Wen Kang, Chua Meng Hong, Tomas Maul", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2110.04260v3", + "title": "Taming Sparsely Activated Transformer with Stochastic Experts", + "abstract": "Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can\neasily scale to have outrageously large amounts of parameters without\nsignificant increase in computational cost. However, SAMs are reported to be\nparameter inefficient such that larger models do not always lead to better\nperformance. While most on-going research focuses on improving SAMs models by\nexploring methods of routing inputs to experts, our analysis reveals that such\nresearch might not lead to the solution we expect, i.e., the commonly-used\nrouting methods based on gating mechanisms do not work better than randomly\nrouting inputs to experts. In this paper, we propose a new expert-based model,\nTHOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models,\nsuch as the Switch Transformer, experts in THOR are randomly activated for each\ninput during training and inference. THOR models are trained using a\nconsistency regularized loss, where experts learn not only from training data\nbut also from other experts as teachers, such that all the experts make\nconsistent predictions. We validate the effectiveness of THOR on machine\ntranslation tasks. Results show that THOR models are more parameter efficient\nin that they significantly outperform the Transformer and MoE models across\nvarious settings. For example, in multilingual translation, THOR outperforms\nthe Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as\nthat of a state-of-the-art MoE model that is 18 times larger. Our code is\npublicly available at:\nhttps://github.com/microsoft/Stochastic-Mixture-of-Experts.", + "authors": "Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Tuo Zhao, Jianfeng Gao", + "published": "2021-10-08", + "updated": "2022-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.01750v1", + "title": "Modular Approach to Machine Reading Comprehension: Mixture of Task-Aware Experts", + "abstract": "In this work we present a Mixture of Task-Aware Experts Network for Machine\nReading Comprehension on a relatively small dataset. We particularly focus on\nthe issue of common-sense learning, enforcing the common ground knowledge by\nspecifically training different expert networks to capture different kinds of\nrelationships between each passage, question and choice triplet. Moreover, we\ntake inspi ration on the recent advancements of multitask and transfer learning\nby training each network a relevant focused task. By making the\nmixture-of-networks aware of a specific goal by enforcing a task and a\nrelationship, we achieve state-of-the-art results and reduce over-fitting.", + "authors": "Anirudha Rayasam, Anusha Kamath, Gabriel Bayomi Tinoco Kalejaiye", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2307.05956v2", + "title": "Language-Routing Mixture of Experts for Multilingual and Code-Switching Speech Recognition", + "abstract": "Multilingual speech recognition for both monolingual and code-switching\nspeech is a challenging task. Recently, based on the Mixture of Experts (MoE),\nmany works have made good progress in multilingual and code-switching ASR, but\npresent huge computational complexity with the increase of supported languages.\nIn this work, we propose a computation-efficient network named Language-Routing\nMixture of Experts (LR-MoE) for multilingual and code-switching ASR. LR-MoE\nextracts language-specific representations through the Mixture of Language\nExperts (MLE), which is guided to learn by a frame-wise language routing\nmechanism. The weight-shared frame-level language identification (LID) network\nis jointly trained as the shared pre-router of each MoE layer. Experiments show\nthat the proposed method significantly improves multilingual and code-switching\nspeech recognition performances over baseline with comparable computational\nefficiency.", + "authors": "Wenxuan Wang, Guodong Ma, Yuke Li, Binbin Du", + "published": "2023-07-12", + "updated": "2023-07-14", + "primary_cat": "cs.SD", + "cats": [ + "cs.SD", + "eess.AS" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1110.2058v2", + "title": "Convergence Rates for Mixture-of-Experts", + "abstract": "In mixtures-of-experts (ME) model, where a number of submodels (experts) are\ncombined, there have been two longstanding problems: (i) how many experts\nshould be chosen, given the size of the training data? (ii) given the total\nnumber of parameters, is it better to use a few very complex experts, or is it\nbetter to combine many simple experts? In this paper, we try to provide some\ninsights to these problems through a theoretic study on a ME structure where\n$m$ experts are mixed, with each expert being related to a polynomial\nregression model of order $k$. We study the convergence rate of the maximum\nlikelihood estimator (MLE), in terms of how fast the Kullback-Leibler\ndivergence of the estimated density converges to the true density, when the\nsample size $n$ increases. The convergence rate is found to be dependent on\nboth $m$ and $k$, and certain choices of $m$ and $k$ are found to produce\noptimal convergence rates. Therefore, these results shed light on the two\naforementioned important problems: on how to choose $m$, and on how $m$ and $k$\nshould be compromised, for achieving good convergence rates.", + "authors": "Eduardo F. Mendes, Wenxin Jiang", + "published": "2011-10-10", + "updated": "2011-11-01", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/1702.04832v1", + "title": "Dynamic Partition Models", + "abstract": "We present a new approach for learning compact and intuitive distributed\nrepresentations with binary encoding. Rather than summing up expert votes as in\nproducts of experts, we employ for each variable the opinion of the most\nreliable expert. Data points are hence explained through a partitioning of the\nvariables into expert supports. The partitions are dynamically adapted based on\nwhich experts are active. During the learning phase we adopt a smoothed version\nof this model that uses separate mixtures for each data dimension. In our\nexperiments we achieve accurate reconstructions of high-dimensional data points\nwith at most a dozen experts.", + "authors": "Marc Goessling, Yali Amit", + "published": "2017-02-16", + "updated": "2017-02-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Mixture AND of AND Experts" + }, + { + "url": "http://arxiv.org/abs/2210.16710v1", + "title": "Prediction Sets for High-Dimensional Mixture of Experts Models", + "abstract": "Large datasets make it possible to build predictive models that can capture\nheterogenous relationships between the response variable and features. The\nmixture of high-dimensional linear experts model posits that observations come\nfrom a mixture of high-dimensional linear regression models, where the mixture\nweights are themselves feature-dependent. In this paper, we show how to\nconstruct valid prediction sets for an $\\ell_1$-penalized mixture of experts\nmodel in the high-dimensional setting. We make use of a debiasing procedure to\naccount for the bias induced by the penalization and propose a novel strategy\nfor combining intervals to form a prediction set with coverage guarantees in\nthe mixture setting. Synthetic examples and an application to the prediction of\ncritical temperatures of superconducting materials show our method to have\nreliable practical performance.", + "authors": "Adel Javanmard, Simeng Shao, Jacob Bien", + "published": "2022-10-30", + "updated": "2022-10-30", + "primary_cat": "math.ST", + "cats": [ + "math.ST", + "stat.ME", + "stat.ML", + "stat.TH" + ], + "category": "Mixture AND of AND Experts" + } +] \ No newline at end of file