diff --git "a/related_34K/test_related_short_2404.16627v1.json" "b/related_34K/test_related_short_2404.16627v1.json" new file mode 100644--- /dev/null +++ "b/related_34K/test_related_short_2404.16627v1.json" @@ -0,0 +1,1417 @@ +[ + { + "url": "http://arxiv.org/abs/2404.16627v1", + "title": "Incorporating Lexical and Syntactic Knowledge for Unsupervised Cross-Lingual Transfer", + "abstract": "Unsupervised cross-lingual transfer involves transferring knowledge between\nlanguages without explicit supervision. Although numerous studies have been\nconducted to improve performance in such tasks by focusing on cross-lingual\nknowledge, particularly lexical and syntactic knowledge, current approaches are\nlimited as they only incorporate syntactic or lexical information. Since each\ntype of information offers unique advantages and no previous attempts have\ncombined both, we attempt to explore the potential of this approach. In this\npaper, we present a novel framework called \"Lexicon-Syntax Enhanced\nMultilingual BERT\" that combines both lexical and syntactic knowledge.\nSpecifically, we use Multilingual BERT (mBERT) as the base model and employ two\ntechniques to enhance its learning capabilities. The code-switching technique\nis used to implicitly teach the model lexical alignment information, while a\nsyntactic-based graph attention network is designed to help the model encode\nsyntactic structure. To integrate both types of knowledge, we input\ncode-switched sequences into both the syntactic module and the mBERT base model\nsimultaneously. Our extensive experimental results demonstrate this framework\ncan consistently outperform all baselines of zero-shot cross-lingual transfer,\nwith the gains of 1.0~3.7 points on text classification, named entity\nrecognition (ner), and semantic parsing tasks. Keywords:cross-lingual transfer,\nlexicon, syntax, code-switching, graph attention network", + "authors": "Jianyu Zheng, Fengfei Fan, Jianquan Li", + "published": "2024-04-25", + "updated": "2024-04-25", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Original Paper", + "paper_cat": "Graph AND Structure AND Learning", + "gt": "Cross-lingual transfer is crucial in the field of natural language processing (NLP) as it enables models trained on one language to be applied to another. To enhance performance in transfer tasks, numerous studies focus on addressing the characteristics of various languages and their relationships. 2.1. Incorporating Lexical Knowledge for Cross-lingual Transfer A group of studies aims to incorporate lexical alignment knowledge into cross-lingual transfer research (Zhang et al., 2021a; Wang et al., 2022; Qin et al., 2021; Lai et al., 2021). For example, Zhang et al. (2021a) and Wang et al. (2022) employ bilingual dictionaries to establish word alignments and subsequently train cross-lingual models by leveraging explicit lexical associations between languages. Other methods (Qin et al., 2021; Lai et al., 2021) involve substituting a portion of words in a sentence with their equivalents from different languages, a technique commonly known as \"codeswitching.\" By increasing the diversity of input text, these approaches promote implicit alignments of language representations. However, this group of studies mainly offers insights into lexical translation across languages, while neglecting the learning of language-specific structural rules. 2.2. Incorporating Syntactic Knowledge for Cross-lingual Transfer Another research category focuses on integrating syntactic knowledge for cross-lingual transfer (Ahmad et al., 2021; Yu et al., 2021; Zhang et al., 2021b; He et al., 2019; Cignarella et al., 2020; Xu et al., 2022; Shi et al., 2022; Wang et al., 2021). Many studies in this group (Ahmad et al., 2021; Wang et al., 2021) develop graph neural networks to encode syntactic structures, a category to which our work also belongs. Taking inspiration from Ahmad et al. (2021), we adopt a similar architecture, specifically using a graph attention network to encode syntactic knowledge. Other methods (Cignarella et al., 2020; Xu et al., 2022) extract sparse syntactic features from text and subsequently incorporate them into the overall model. Although these approaches consider the relationships between language elements, they frequently overlook the alignments across languages, which impedes the effective transfer of linguistic elements and rules between languages. Consequently, we combine the strengths of these two categories of approaches. First, we replace the input sequence with translated words from other languages, which aids in guiding the entire model to acquire implicit alignment information. Then, we introduce an additional module to assist the model in encoding syntax.", + "pre_questions": [], + "main_content": "Introduction Unsupervised cross-lingual transfer refers to the process of leveraging knowledge from one language, and applying it to another language without explicit supervision (Conneau et al., 2019). Due to the free requirement of the labeled data in target language, it is highly preferred for low-resource scenarios. Recently, unsupervised cross-lingual transfer has been widely applied in various natural language processing (NLP) tasks, such as part-ofspeech (POS) tagging (Kim et al., 2017; de Vries et al., 2022), named entity recognition (NER) (Fetahu et al., 2022; Xie et al., 2018), machine reading comprehension (Hsu et al., 2019; Chen et al., 2022), and question answering (QA) (Nooralahzadeh and Sennrich, 2023; Asai et al., 2021). The success of unsupervised cross-lingual transfer can be attributed to its ability to exploit connections across languages, which are reflected in various linguistic aspects such as lexicon, semantics, and syntactic structures. Consequently, many studies have sought to enhance models by encouraging them to learn these cross-lingual commonalities. For instance, in the lexical domain, Qin et al. (2021) utilize bilingual dictionaries to randomly replace certain words with their translations in other languages, thereby encouraging models to implicitly align representations between the source language and multiple target languages. In the area of syntax, several works have developed novel neural archi\u2217Equal Contribution \u2020 Jianquan Li is the corresponding author tectures to guide models in encoding the structural features of languages. Ahmad et al. (2021), for example, proposes a graph neural network (GNN) to encode the structural representation of input text and fine-tune the GNN along with the multilingual BERT (mBERT) for downstream tasks. Both lexical and syntactic approaches facilitate the alignment of linguistic elements across different languages, thereby enhancing the performance of cross-lingual transfer tasks. However, language is a highly intricate system (Ellis and Larsen-Freeman, 2009), with elements at various levels being interconnected. For example, sentences are composed of phrases, which in turn are composed of words. In cross-lingual transfer, we hypothesize that merely guiding models to focus on a single linguistic aspect is inadequate. Instead, by simultaneously directing models to learn linguistic knowledge across diverse levels, their performance can be further improved. Table 1 presents some example sentences extracted from the XNLI dataset (Conneau et al., 2018). These parallel sentence pairs demonstrate that the multilingual model makes incorrect predictions for sentence pairs in the target languages (French and German) when only one aspect of linguistic knowledge, such as lexical or syntactic knowledge, is incorporated. However, when both types of knowledge are integrated into the model, the correct prediction is obtained. Despite this, most previous studies have focused on either syntactic or lexical information alone, without considering the integration of both types of information. arXiv:2404.16627v1 [cs.CL] 25 Apr 2024 Lang Premise(P)/Hypothesis(H) Label +Lex +Syn Ours P:Votre soci\u00e9t\u00e9 charitable fournit non seulement de les services sociaux communautaires efficaces \u00e0 ang Premise(P)/Hypothesis(H) Label +Lex +Syn Ours fr P:Votre soci\u00e9t\u00e9 charitable fournit non seulement de les services sociaux communautaires efficaces \u00e0 les animaux et les personnes, mais sert \u00e9galement \u00e9galement de fourri\u00e8re pour la Ville de Nashua. H:La soci\u00e9t\u00e9 humaine est le refuge pour animaux de Nashua. entali contra contra entail P:Ihre humane Gesellschaft erbringt nicht nur effektive gemeinschaftlich-soziale Dienstleistungen H:La soci\u00e9t\u00e9 humaine est le refuge pour animaux de Nashua. de P:Ihre humane Gesellschaft erbringt nicht nur effektive gemeinschaftlich-soziale Dienstleistungen f\u00fcr Tiere und ihre Menschen, sondern dient auch als Zwinger der Stadt Nashua. H:Die Humane Society ist Nashuas Tierheim. entail contra contra entail P:Your humane society provides not only effective community social services for animals and their H:Die Humane Society ist Nashuas Tierheim. en P:Your humane society provides not only effective community social services for animals and their people , but also serves as the pound for the City of Nashua . H:The humane society is Nashua\u2019s animal shelter . Table 1: The parallel sentence pairs in French and German from XNLI(Conneau et al., 2018), which are translated from English. Each sentence pair consist of a Premise sentence(P) and a Hypothesis sentence(H). The \"Label\" column indicates the relationship between each sentence pair, which can be contradiction(contra), entailment(entail) or neutral. \"+Lex\" and \"+Syn\" represent the prediction results from the multilingual models infused with lexical and syntactic knowledge, respectively. The \"ours\" column shows the results of integrating both types of knowledge into the model. Compared to the other two methods, our method can accurately predict the relationship between each sentence pair. In this work, we aim to enhance unsupervised cross-lingual transfer by integrating knowledge from different linguistic levels. To achieve this, we propose a framework called \"Lexicon-Syntax Enhanced Multilingual BERT\" (\"LS-mBERT\"), based on a pre-trained multilingual BERT model. Specifically, we first preprocess the input source language sequences to obtain each word\u2019s part-of-speech information and dependency relationships between words in each sentence. Then, we replace some words in the sentence with their translations from other languages while preserving the established dependency relationships. Furthermore, we employ a graph attention network(Veli\u010dkovi\u0107 et al., 2017) to construct a syntactic module, the output of which is integrated into the attention heads of the multilingual BERT. This integration guides the entire model to focus on syntactic structural relationships. Finally, during the fine-tuning process, we simultaneously train the multilingual BERT and the syntactic module with the pre-processed text. As a result, our framework enables the multilingual BERT to not only implicitly learn knowledge related to lexical alignment but also encode knowledge about syntactic structure. To validate the effectiveness of our framework, we conduct experiments on various tasks, including text classification, named entity recognition (ner), and semantic parsing. The experimental results show that our framework consistently outperforms all baseline models in zero-shot cross-lingual transfer across these tasks. For instance, our method achieves the improvement of 3.7 points for mTOP dataset. Our framework also demonstrates significant improvements in generalized cross-lingual transfer. Moreover, we examine the impact of important parameters, such as the replacement ratio of source words, and languages for replacement. To facilitate further research explorations, we release our code at https://github.com/ Tian14267/LS_mBert. In this section, we provide a detailed introduction to our framework \"LS-mBERT\", as illustrated in Figure 1. Our objective is to enhance the crosslingual transfer capabilities of multilingual BERT (mBERT) by incorporating both lexical and syntactic knowledge. Given an input sequence, we first pre-process it using a part-of-speech tagger and a universal parser(Section 3.1). This yields the part-of-speech tag for each word and dependency relationships among words in the sequence. To enable mBERT to implicitly encode word alignment information, we substitute some words with their translations from other languages using a code-switching technology (Section 3.2). Moreover, to guide mBERT in attending to syntactic relationships, we construct a graph attention network (GAT), introduced in Section 3.3. The output of the graph attention network is then used as input to the attention heads within BERT, effectively biasing attention information between words. Finally, to integrate both syntactic and lexical knowledge, we pass the code-switched text into both the GAT network and mBERT, which are trained simultaneously (Section 3.4). 3.1. Pre-processing Input Sequence The initial step involves pre-processing the input data to obtain prior knowledge for subsequent training. As our framework incorporates syntactic knowledge, we opt for an off-the-shelf parser with high accuracy to process the input text. In this case, we employ the UDPipe toolkit(Straka and Strakov\u00e1, 2017) to parse the inputs sentences, and Stanza(Qi et al., 2020) to annotate the part-of-speech information of each word. By utilizing both tools, given a sentence, we can obtain the dependency relationships between words and their part-of-speech information, which are then utilized to provide syntactic knowledge and enhance word representations, respectively. 3.2. Code-switching for Text (lexical knowledge) As our objective is to improve unsupervised crosslingual transfer, introducing explicit alignment signals would be inappropriate. Therefore, we employ an implicit strategy to guide the entire model to encode word alignment information. Inspired by the work of Qin et al. (2021), we opt for the codeswitching strategy. Specifically, we first randomly select a proportion \u03b1 of words within each source sentence. Then, for each selected word, we use a high-quality bilingual dictionary to substitute it with a corresponding translation from another target language. This method not only promotes the implicit alignment of representations across diverse languages within our model, but also enhances the model\u2019s robustness when processing input text. 3.3. Graph Attention Network (syntactic knowledge) To guide mBERT in acquiring syntactic knowledge better, we construct an external syntactic module by referring to the method introduced by Ahmad et al. (2021). The overview of this module is displayed in Figure 2. Given that there are n tokens in the input sequence, we first represent each token by combining its embedding representation with part-of-speech (POS) information. The representation of the i-th token can be calculated: xi = ciWc + posiWpos, where ci and posi represent the token representation and the part-ofspeech representation of the i-th token, respectively; while Wc and Wpos denote the token parameter matrix and the part-of-speech parameter matrix. Then, the encoded sequence s\u2032 = [x1, x2, \u00b7 \u00b7 \u00b7 , xn] is passed into the subsequent syntactic module, which is designed with a graph attention network (GAT) (Veli\u010dkovi\u0107 et al., 2017). The GAT module comprises a total of L layers, each with m attention heads. These attention heads play a crucial role in generating representations for individual tokens by attending to neighboring tokens in the graph. Each attention in GAT operates as follows: O = Attention(T, T, V, M), wherein T denotes the query and key matrices, and V represents the value matrix. Besides, M signifies the mask matrix, determining whether a pair of words in the dependency tree can attend each other. Notably, the relationships between words in the attention matrix are modeled based on the distances between words in codeswitching part-of-speech tagging dependency parsing UDPipe bilingual dictionary guidelines (Root) mean needed new the iron donors are more nsubj det amod compound nsubj aux amod ccomp leitlinien (Root) mean necesitaba new the fer donors are \u66f4\u591a\u7684 nsubj det amod compound nsubj aux amod ccomp GAT network The new iron guidelines mean more donors are needed Label Multilingual BERT The_DET new_ADJ iron_NOUN guidelines_NOUN mean_VERB more _ A D J donors _ N O U N are_AUX needed_VERB The_DET new_ADJ fer_NOUN leitlinien_NOUN mean_VERB \u66f4\u591a \u7684_ADJ donors_NOUN are_AUX necesitaba_VERB codeswitching Figure 1: An overview of lexicon-syntax enhanced multilingual BERT (\"LS-mBERT\"). An example sentence is provided to explain how this framework works. To introduce lexical alignment knowledge, we utilize bilingual dictionaries to randomly replace some words in the sentence with the equivalent words from other languages (pink for German, green for Spanish, light blue for Chinese, and orange for French). Then, an graph attention network (GAT) is developed to encode the syntactic structure of this sentence. The output representation of GAT is sent to the attention heads in multilingual BERT for guiding them to focus on the language-specific structures. the dependency tree, rather than the positional information within the word sequence. Subsequently, the resulting representations produced by all attention heads are concatenated to form the output representations for each token. Finally, the output sequence from the final layer can be denoted as Y = [y1, y2, \u00b7 \u00b7 \u00b7 , yn], where yi represents the output representation for the i-th token. To maintain the lightweight nature of the architecture, certain elements in GAT have been excluded. Specifically, we do not employ feed-forward sub-layers, residual connections, or positional representations. We found that these modifications do not result in a significant performance gap. 3.4. Summary of the Framework: Lexicon-syntax Enhanced Multilingual BERT In this subsection, we provide an overview of our \"LS-mBERT\" framework, as illustrated in Figure 1. We first select multilingual BERT (mBERT) as the base model. Then, we process the input sequence using the code-switching strategy in Section 3.2, resulting in the code-switched sequence s\u2032. It is important to note that despite some words in each sentence being replaced with other languages, the original dependency relationships between words are still preserved in s\u2032. Next, we feed the codeswitched text into both mBERT and the syntactic module (GAT), facilitating the fusion of the two types of knowledge. Furthermore, this step guides the entire model to better align different languages within the high-dimensional vector space during training. After GAT processes the code-switched sequence, the output from the final layer is utilized to bias the attention heads of mBERT. The calculation process can be described as follows: O = Attention(Q + Y W Q l , K + Y W K l , V ), where Q, K, and V represent the query, key, and value matrices, respectively; While W Q l and W K l are new parameters to learn for biasing the query and key matrices. t1 c1 pos1 + x1 t2 c2 pos2 + x2 ... ... ... tn-1 cn-1 posn-1 + xn-1 tn cn posn + xn ... ... + + + + + m \u00d7 L layers y1 y2 yn-1 yn ... input seq token emb pos emb att layer Figure 2: The architecture of graph attention network (Ahmad et al., 2021; Veli\u010dkovi\u0107 et al., 2017). Each input token is represented by combining its token embedding and part-of-speech embedding. Each attention head within the graph attention network(GAT) generates a representation for each token embedding by attending to its neighboring tokens in the dependency graph. Next, the resulting representations are concatenated to form the output representation for each token. Finally, we can obtain the representations of the output sequence embeddings from the final layer of GAT. 4. Experiments 4.1. Experimental Settings As above mentioned, we use UDPipe (Straka and Strakov\u00e1, 2017) and Stanza (Qi et al., 2020) for parsing sentences and obtaining words\u2019 part-ofspeech information in all languages, and employ MUSE (Lample et al., 2018) as the bilingual dictionary for word substitution. For all tasks, we identify the optimal parameter combinations by searching within the candidate sets. The learning rate is set to 2e-5, utilizing AdamW as the optimizer. The batch size is 64, and the maximum length for input sequences is 128 tokens. For code-switching, we vary the replacement ratio (\u03b1) from 0.3 to 0.7 with a step of 0.1. For the GAT network, we adopt the identical parameter values as employed in the work of Ahmad et al. (2021). Specifically, we set L to 4 and k to 4. 4.2. Tasks Our framework is evaluated on the following tasks, using English as the source language. Some statistics are summarized in Table 2, along with the detailed descriptions provided below. Text Classification. Text Classification is a task that assigns predefined categories to open-ended text. In our experiment, we utilize two publicly available dataset: XNLI and PAWS-X. In XNLI (Conneau et al., 2018), models need to predict whether a given pair of sentences is entailed, contradicted, or neutral; In PAWS-X (Yang et al., 2019), models are required to determine whether two given sentences or phrases convey the same meaning. When implementing the two tasks, to establish connections between the dependency trees of the two sentences, we introduce two edges from the [CLS] token to the root nodes. Subsequently, we apply the code-switching technique to randomly replace certain words in the sentence pairs. Named Entity Recognition. Named Entity Recognition (NER) is a task that involves the automatic identification and categorization of named entities. In our experiment, we employ the Wikiann (Pan et al., 2017) dataset. Wikiann consists of Wikipedia articles annotated with person, location, organization, and other tags in the IOB2 format. Our method is evaluated across 15 languages. To ensure that the models can obtain complete entity information, we exclusively substitute words that do not constitute named entities during the code-switching process. Task-oriented Semantic Parsing. In this task, the models are required to determine the intent of the utterance and then fill the relevant slots. The dataset for the experiment is mTOP (Li et al., 2021), which is an almost parallel corpus, containing 100k examples in total across 6 languages. Our experiments cover 5 languages. 4.3. Baselines We choose the following methods as baselines to compare: \u2022 mBERT. We exclusively utilize the multilingual BERT model to perform zero-shot crosslingual transfer for these tasks. \u2022 mBERT+Syn. A graph attention network (GAT) is integrated with multilingual BERT, and these two components are jointly trained for all tasks. \u2022 mBERT+Code-switch. The multilingual BERT model is fine-tuned with the codeswitched text across various languages. 5. Results and analysis 5.1. Cross-Lingual Transfer Results The main experimental results are displayed in Table 3. Our method consistently demonstrates superior performance across all tasks compared to other baselines. This indicates our method\u2019s effectiveness for cross-lingual transfer, achieved through the incorporation of lexical and syntactic knowledge. Especially for the tasks Wikiann and mTOP, our method exhibits a significant improvement, with an increase of 2.2 and 3.7 points, respectively, when compared to the baseline with the best performance. In addition, since code-switching technique blends words from various language, we calculate the results across the languages excluding English, as shown in the column \"AVG/en\" in Table 3. We find that the performance gap between our method and each baseline in most tasks becomes wider. This also indicates that our method can more effectively align non-English languages within the same vector space implicitly. For each task, we discover most of languages can gain improvement by using our method, as compared to the top-performing baseline. Specifically, 84.6% (11/13), 100.0% (7/7), 80.0% (12/15) and 100.0% (5/5) languages demonstrate improvement in XNLI, PAWS-X, Wikiann and mTOP respectively. Furthermore, our method also provides improvement for non-alphabetic languages in many tasks, such as Chinese, Japan and Korean. This reflects that our method can be effectively generalized into various target languages, even in cases where significant differences exist between the source and target languages. Task Dataset |Train| |Dev| |Test| |Lang| Metric Classification XNLI 392K 2.5K 5K 13 Accuracy Classification PAWS-X 49K 2K 2K 7 Accuracy NER Wikiann 20K 10K 1-10K 15 F1 Semantic Parsing mTOP 15.7K 2.2K 2.8-4.4K 5 Exact Match Table 2: Evaluation datasets. |Train|, |Dev| and |Test| delegate the numbers of examples in the training, validation and testing sets, respectively. |Lang| is the number of target languages we use in each task. Tasks Methods en ar bg de el es fr hi ru tr ur vi zh ko nl pt ja AVG / en AVG XNLI (Conneau et al., 2018) mBERT 80.8 64.3 68.0 70.0 65.3 73.5 73.4 58.9 67.8 60.9 57.2 69.3 67.8 66.4 67.5 mBERT+Syn 81.6 65.4 69.3 70.7 66.5 74.1 73.2 60.5 68.8 62.4 58.7 69.9 69.3 67.4 68.5 mBERT+code-switch 80.9 64.2 70.0 71.5 67.1 73.7 73.2 61.6 68.9 58.6 57.8 69.9 70.0 67.2 68.3 our method 81.3 65.8 71.3 71.8 68.3 75.2 74.2 62.8 70.7 61.1 58.8 71.8 70.8 68.6 69.5 PAWS-X (Yang et al., 2019) mBERT 94.0 85.7 87.4 87.0 77.0 69.6 73.0 80.2 81.7 mBERT+Syn 93.7 86.2 89.5 88.7 78.8 75.5 75.9 82.7 83.9 mBERT+code-switch 92.4 85.9 87.9 88.3 80.2 78.0 78.0 83.4 84.3 our method 93.8 87.2 89.6 89.4 81.8 79.0 80.0 84.6 85.6 Wikiann(Pan et al., 2017) mBERT 83.7 36.1 76.0 75.2 68.0 75.8 79.0 65.0 63.9 69.1 38.7 71.0 58.9 81.3 79.0 66.9 68.1 mBERT+Syn 84.1 34.6 76.9 75.4 68.2 76.0 79.1 64.0 64.2 68.7 38.0 73.1 58.0 81.7 79.5 67.0 68.1 mBERT+code-switch 82.4 39.2 77.1 75.2 68.2 71.0 78.0 66.1 64.2 72.4 41.3 69.2 59.9 81.3 78.9 67.3 68.3 our method 84.5 41.4 78.9 77.3 70.2 75.3 80.3 67.6 63.9 73.1 46.8 72.6 62.2 81.8 80.8 69.4 70.5 mTOP(Li et al., 2021) mBERT 81.0 28.1 40.2 38.8 9.8 29.2 39.6 mBERT+Syn 81.3 30.0 43.0 41.2 11.5 31.4 41.4 mBERT+code-switch 82.3 40.3 47.5 48.2 16.0 38.0 46.8 our method 83.5 44.5 54.2 51.7 18.8 47.3 50.5 Table 3: The experimental results on four tasks. The best results in each task are highlighted in bold. The baselines include \"mBERT\", \"mBERT+Syn\" and \"mBERT+codeswitch\". They delegate \"only using mBERT\", \"using mBERT with a syntactic module (GAT)\" and \"mBERT with the code-switching technique\" for cross-lingual transfer. The results of \"mBERT\" is from Hu et al. (2020). For \"mBERT+Syn\" and \"mBERT+code-switch\", we adopt open-source code of the work of Ahmad et al. (2021) and Qin et al. (2021) to reproduce these experiments, and report the results. The evaluation metrics are F1 value for the NER task, Accuracy for classification tasks, and Exact Match for semantic parsing. The \"AVG\" column means the average performance across all language for each method, while the \"AVG /en\" indicates the average performance on the languages excluding English. 5.2. Generalized Cross-Lingual Transfer Results In practical scenarios, cross-lingual transfer could involve any language pair. For example, in a crosslingual question-answering (QA) task, the context passage may be in German, while the multilingual model is required to answer the question in French. Considering on this, we conduct zero-shot cross-lingual transfer experiments within a generalized setting. Since PAWS-X and mTOP are completely parallel, we evaluate the performance of our method and \"mBERT\" baseline on generalized cross-lingual transfer tasks using the two dataset. The experimental results are illustrated in Figure 3. For both classification and semantic parsing benchmarks, we have observed improvements among most language pairs. This reflects that our method is very effective for generalized crosslingual transfer. Furthermore, when English is included in the language pair, there is a substantial enhancement in performance. Specifically, when English serves as the source language, the average performance of target languages is increased over 10% and 3% in mTOP and PAWS-X dataset, respectively. This reflects the effectiveness of the code-switching in aligning other languages with English. For the PAWS-X dataset, we find that some non-Indo-European languages such as Japanese, Korean, and Chinese can achieve improvements, even when the source languages belong to the Indo-European language family, including English, Spanish, French, and German. It reflects that syntactic knowledge can effectively narrow the gap of language structures for this task, especially for the language pairs without close linguistic relationships. 6. Analysis and Discussion 6.1. Impact on Languages We investigate whether our method can improve the performance of specific languages or language groups. As shown in Figure 4, we display the performance improvement of our method by comparing the \"mBERT\" baseline. We find that almost languages can obtain benefits from our method. Particularly, when the target language, such as German, Spanish and French, belongs to the IndoEuropean language family, the improvement is very significant. Furthermore, the performance in the mTOP task is improved significantly by our method among all languages. This may be because that our method consider both syntax and lexicon simultaneously, which is beneficial for the semantic parsing task. target source performance difference (a) mTOP target (b) PAWS-X performance difference source Figure 3: Results for generalized zero-shot cross-lingual transfer on mTOP and PAWS-X. We report the performance differences between our method and \"mBERT\" baseline across all languages. -5 0 5 10 15 20 en de es fr bg ru ar vi tr ur el hi zh ko Performance Improvement(%) Language XNLI PAWS-X Wikiann mTOP Figure 4: Performance improvements for XNLI, PAWS-X, Wikiann, and mTOP across languages. The languages in x-axis are grouped by language families: IE.Germanic (en, de), IE.Romance (es, fr), IE.Slavic (bg, ru), Afro-asiatic (ar), Austro-asiatic (vi), Altaic (tr, ur), IE.Greek (el), IE.Indic (hi), Sino-tibetan (zh), Korean (ko). 6.2. Representation Similarities across Languages To evaluate the effectiveness of our method in aligning different languages, we employ the representation similarity between languages as the metric. Specifically, we utilize the testing set of XNLI (Conneau et al., 2018) as the dataset, which consists of parallel sentences across multiple languages. Then we take the vector of [CLS] token from the final layer of our model, as well as the vectors from two baselines (\"mBERT+Syn\" and \"mBERT+codeswitch) for each sentence. Following Libovick` y et al. (2019), the centroid vector for representing each language is calculated by averaging these sentence representations. Finally, we adopt cosine similarity as the indicator to assess the degree of alignment between English and each target language. Figure 5 illustrates the similarities between languages by using our method and the other two baselines. It can be easily found that our method outperforms the other two baselines in aligning language representations. This suggests that infusing two types of knowledge is indeed effective in reducing the disparities in language typologies, which improve cross-lingual transfer performance. In addition, we observe that \"mBERT+code-switch\" performs better than \"mBERT+Syn\", which reflects that lexical knowledge is more useful than syntactic knowledge for this task. 6.3. Impact of Code-switching The replacement ratio \u03b1 for code-switching is an important hyper-parameter in our method. Hence, we explore its impact on mTOP and PAWS-X, by varying \u03b1 from 0 to 0.9 in increments of 0.1, shown in Figure 6. When \u03b1 is set to 0, it represents the results of the baseline \"mBERT+Syn\". As \u03b1 increases, more source words are substituted with their equivalent words from other languages. The performance improvement certificates the effectiveness of code-switching technique. Notably, when about half of the words are replaced (0.5 for PAWS80 85 90 95 100 ar bg de el es fr hi ru tr ur vi zh mBERT+Syn mBERT+code-switch LS-mBERT Figure 5: The similarities between languages. We first calculate the centroid representation for each language following Libovick` y et al. (2019). Then we adopt cosine similarity to evaluate the similarity between English and each target language. X and 0.4 for mTOP), the performance reaches their peaks. After that, both tasks experience a decline in performance. This decline might be because the expression of meaning and sentence structure are influenced severely as too many words are replaced. Therefore, it is a optimal choice to set \u03b1 between 0.4 to 0.5 for code-switching. Figure 6: Performance on mTOP and PAWS-X with different replacement ratio \u03b1 in code-switching. Furthermore, we investigate whether the choice of the replacement language in code-switching impacts our model\u2019s performance. We select mTOP and PAWS-X as the testing tasks. In codeswitching, we devise three different measures for language replacement: \"Exclusively replacing with the target language\", \"Replacing with languages from the same language family as the target language\"; and \"Replacing with languages selected randomly\". The experimental results are illustrated in Figure 7. We can easily observe that \"Exclusively replacing with the target language\" performs best, while \"Replacing with randomly selected languages\" yields the poorest results. Hence, this also underscores the importance of selecting languages closely related to each target language for substitution when employing the code-switching technique. 35 45 55 65 75 85 95 mTOP PAWS-X Performance(%) Type1 Type2 Type3 Figure 7: Performance on mTOP and PAWS-X with different replacement languages in code-switching. The source language for both tasks is English, and the results are averaged across all target languages excluding English. \u201cType1\u201d represents the replacement with the target language; \u201cType2\u201d represents the replacement with languages from the same language family as the target language; \u201cType3\u201d represents the replacement with randomly selected languages. 6.4. Performance with XLM-R To validate the universality of our method, we substitute multilingual BERT with XLM-R in our framework. XLM-R is a more robust multilingual pre-trained model known for its exceptional crosslingual transfer capabilities. Subsequently, we test its performance on the PAWX-S dataset, and the experimental results are displayed in Table 4. In Table 4, we also observe that our framework outperforms the other three baselines. This indicates that integrating lexical and syntactic knowledge is beneficial for enhancing performance, irrespective of the base model employed. Notably, our framework only achieves the slight performance improvement when utilizing XLM-R as the base model compared to employing multilingual BERT. It may be because that the base model, XLM-R, adopt larger corpus during pre-training, resulting in preserving richer language information. Consequently, XLM-R itself has possessed superior cross-lingual transfer capabilities. The assistance by incorporating external linguistic knowledge appears to be relatively minor in comparison. 6.5. Limitations and Challenges In our study, we adopt a bilingual dictionary, such as MUSE (Lample et al., 2018), to substitute words in other languages. However, we randomly choose a target language word when there exist multiple translations for a source language word. This approach, although convenient, neglect the context of the source language word, potentially leading to inaccurate translations. This also highlights us to explore more precise word alignment methods in Task Methods en ar bg de el es fr hi ru tr ur vi ko nl pt AVG PAWS-X XLM-R 84.2 48.5 80.5 77.0 77.8 76.1 79.8 67.5 70.4 76.0 54.2 78.5 59.1 83.3 79.3 72.8 XLM-R+Syn 83.5 46.4 80.1 76.0 78.9 77.6 79.1 72.1 70.6 76.1 55.3 77.6 59.0 83.1 79.2 73.0 XKLM-R+code-switch 83.4 46.8 81.7 78.2 79.2 71.1 78.6 72.9 70.6 77.2 57.9 76.0 58.2 83.6 80.0 73.0 our method 83.1 44.9 82.7 76.8 78.4 76.9 79.6 71.1 70.1 76.6 60.4 78.2 58.1 83.5 79.7 73.3 Table 4: Results for PAWS-X with XLM-R. the future. Furthermore, the tasks we have evaluated are quite limited, with some of them involving only a few languages. In the future, we will extend our method to more cross-lingual tasks. Meanwhile, we also develop dataset for these tasks to support more languages. 7. Conclusion In this paper, we present a framework called \"lexicon-syntax enhanced multilingual BERT\" (\"LSmBERT\"), which infuses lexical and syntactic knowledge to enhance cross-lingual transfer performance. Our method employs code-switching technology to generate input text mixed in various languages, enabling the entire model to capture lexical alignment information during training. Besides, a syntactic module consisting of a graph attention network (GAT) is introduced to guide mBERT in encoding language structures. The experimental results demonstrate that our proposed method outperforms all the baselines across different tasks, which certificates the effectiveness of integrating both types of knowledge into mBERT for improving cross-lingual transfer. In the future, we plan to incorporate different linguistic knowledge into large language models (LLMs) to further enhance cross-lingual transfer performance. 8. Acknowledgements The authors would like to thank the anonymous reviewers for their feedback and suggestions. Additionally, this work was supported by the Major Program of the National Social Science Fund of China (18ZDA238), the National Social Science Fund of China (No.21CYY032), Beihang University Sponsored Projects for Core Young Researchers in the Disciplines of Social Sciences and Humanities(KG16183801) and Tianjin Postgraduate Scientific Research Innovation Program (No.2022BKY024). 9. Bibliographical" + }, + { + "url": "http://arxiv.org/abs/2112.00503v5", + "title": "Zero-Shot Cross-Lingual Machine Reading Comprehension via Inter-sentence Dependency Graph", + "abstract": "We target the task of cross-lingual Machine Reading Comprehension (MRC) in\nthe direct zero-shot setting, by incorporating syntactic features from\nUniversal Dependencies (UD), and the key features we use are the syntactic\nrelations within each sentence. While previous work has demonstrated effective\nsyntax-guided MRC models, we propose to adopt the inter-sentence syntactic\nrelations, in addition to the rudimentary intra-sentence relations, to further\nutilize the syntactic dependencies in the multi-sentence input of the MRC task.\nIn our approach, we build the Inter-Sentence Dependency Graph (ISDG) connecting\ndependency trees to form global syntactic relations across sentences. We then\npropose the ISDG encoder that encodes the global dependency graph, addressing\nthe inter-sentence relations via both one-hop and multi-hop dependency paths\nexplicitly. Experiments on three multilingual MRC datasets (XQuAD, MLQA,\nTyDiQA-GoldP) show that our encoder that is only trained on English is able to\nimprove the zero-shot performance on all 14 test sets covering 8 languages,\nwith up to 3.8 F1 / 5.2 EM improvement on-average, and 5.2 F1 / 11.2 EM on\ncertain languages. Further analysis shows the improvement can be attributed to\nthe attention on the cross-linguistically consistent syntactic path.", + "authors": "Liyan Xu, Xuchao Zhang, Bo Zong, Yanchi Liu, Wei Cheng, Jingchao Ni, Haifeng Chen, Liang Zhao, Jinho D. Choi", + "published": "2021-12-01", + "updated": "2022-03-15", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2106.02134v1", + "title": "Syntax-augmented Multilingual BERT for Cross-lingual Transfer", + "abstract": "In recent years, we have seen a colossal effort in pre-training multilingual\ntext encoders using large-scale corpora in many languages to facilitate\ncross-lingual transfer learning. However, due to typological differences across\nlanguages, the cross-lingual transfer is challenging. Nevertheless, language\nsyntax, e.g., syntactic dependencies, can bridge the typological gap. Previous\nworks have shown that pre-trained multilingual encoders, such as mBERT\n\\cite{devlin-etal-2019-bert}, capture language syntax, helping cross-lingual\ntransfer. This work shows that explicitly providing language syntax and\ntraining mBERT using an auxiliary objective to encode the universal dependency\ntree structure helps cross-lingual transfer. We perform rigorous experiments on\nfour NLP tasks, including text classification, question answering, named entity\nrecognition, and task-oriented semantic parsing. The experiment results show\nthat syntax-augmented mBERT improves cross-lingual transfer on popular\nbenchmarks, such as PAWS-X and MLQA, by 1.4 and 1.6 points on average across\nall languages. In the \\emph{generalized} transfer setting, the performance\nboosted significantly, with 3.9 and 3.1 points on average in PAWS-X and MLQA.", + "authors": "Wasi Uddin Ahmad, Haoran Li, Kai-Wei Chang, Yashar Mehdad", + "published": "2021-06-03", + "updated": "2021-06-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1909.00310v3", + "title": "Syntax-aware Multilingual Semantic Role Labeling", + "abstract": "Recently, semantic role labeling (SRL) has earned a series of success with\neven higher performance improvements, which can be mainly attributed to\nsyntactic integration and enhanced word representation. However, most of these\nefforts focus on English, while SRL on multiple languages more than English has\nreceived relatively little attention so that is kept underdevelopment. Thus\nthis paper intends to fill the gap on multilingual SRL with special focus on\nthe impact of syntax and contextualized word representation. Unlike existing\nwork, we propose a novel method guided by syntactic rule to prune arguments,\nwhich enables us to integrate syntax into multilingual SRL model simply and\neffectively. We present a unified SRL model designed for multiple languages\ntogether with the proposed uniform syntax enhancement. Our model achieves new\nstate-of-the-art results on the CoNLL-2009 benchmarks of all seven languages.\nBesides, we pose a discussion on the syntactic role among different languages\nand verify the effectiveness of deep enhanced representation for multilingual\nSRL.", + "authors": "Shexia He, Zuchao Li, Hai Zhao", + "published": "2019-09-01", + "updated": "2019-09-10", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.09435v2", + "title": "Expanding Pretrained Models to Thousands More Languages via Lexicon-based Adaptation", + "abstract": "The performance of multilingual pretrained models is highly dependent on the\navailability of monolingual or parallel text present in a target language.\nThus, the majority of the world's languages cannot benefit from recent progress\nin NLP as they have no or limited textual data. To expand possibilities of\nusing NLP technology in these under-represented languages, we systematically\nstudy strategies that relax the reliance on conventional language resources\nthrough the use of bilingual lexicons, an alternative resource with much better\nlanguage coverage. We analyze different strategies to synthesize textual or\nlabeled data using lexicons, and how this data can be combined with monolingual\nor parallel text when available. For 19 under-represented languages across 3\ntasks, our methods lead to consistent improvements of up to 5 and 15 points\nwith and without extra monolingual text respectively. Overall, our study\nhighlights how NLP methods can be adapted to thousands more languages that are\nunder-served by current technology", + "authors": "Xinyi Wang, Sebastian Ruder, Graham Neubig", + "published": "2022-03-17", + "updated": "2022-04-06", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2105.11246v1", + "title": "Cross-lingual Text Classification with Heterogeneous Graph Neural Network", + "abstract": "Cross-lingual text classification aims at training a classifier on the source\nlanguage and transferring the knowledge to target languages, which is very\nuseful for low-resource languages. Recent multilingual pretrained language\nmodels (mPLM) achieve impressive results in cross-lingual classification tasks,\nbut rarely consider factors beyond semantic similarity, causing performance\ndegradation between some language pairs. In this paper we propose a simple yet\neffective method to incorporate heterogeneous information within and across\nlanguages for cross-lingual text classification using graph convolutional\nnetworks (GCN). In particular, we construct a heterogeneous graph by treating\ndocuments and words as nodes, and linking nodes with different relations, which\ninclude part-of-speech roles, semantic similarity, and document translations.\nExtensive experiments show that our graph-based method significantly\noutperforms state-of-the-art models on all tasks, and also achieves consistent\nperformance gain over baselines in low-resource settings where external tools\nlike translators are unavailable.", + "authors": "Ziyun Wang, Xuan Liu, Peiji Yang, Shixing Liu, Zhisheng Wang", + "published": "2021-05-24", + "updated": "2021-05-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2105.11246v1", + "title": "Cross-lingual Text Classification with Heterogeneous Graph Neural Network", + "abstract": "Cross-lingual text classification aims at training a classifier on the source\nlanguage and transferring the knowledge to target languages, which is very\nuseful for low-resource languages. Recent multilingual pretrained language\nmodels (mPLM) achieve impressive results in cross-lingual classification tasks,\nbut rarely consider factors beyond semantic similarity, causing performance\ndegradation between some language pairs. In this paper we propose a simple yet\neffective method to incorporate heterogeneous information within and across\nlanguages for cross-lingual text classification using graph convolutional\nnetworks (GCN). In particular, we construct a heterogeneous graph by treating\ndocuments and words as nodes, and linking nodes with different relations, which\ninclude part-of-speech roles, semantic similarity, and document translations.\nExtensive experiments show that our graph-based method significantly\noutperforms state-of-the-art models on all tasks, and also achieves consistent\nperformance gain over baselines in low-resource settings where external tools\nlike translators are unavailable.", + "authors": "Ziyun Wang, Xuan Liu, Peiji Yang, Shixing Liu, Zhisheng Wang", + "published": "2021-05-24", + "updated": "2021-05-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2006.06402v2", + "title": "CoSDA-ML: Multi-Lingual Code-Switching Data Augmentation for Zero-Shot Cross-Lingual NLP", + "abstract": "Multi-lingual contextualized embeddings, such as multilingual-BERT (mBERT),\nhave shown success in a variety of zero-shot cross-lingual tasks. However,\nthese models are limited by having inconsistent contextualized representations\nof subwords across different languages. Existing work addresses this issue by\nbilingual projection and fine-tuning technique. We propose a data augmentation\nframework to generate multi-lingual code-switching data to fine-tune mBERT,\nwhich encourages model to align representations from source and multiple target\nlanguages once by mixing their context information. Compared with the existing\nwork, our method does not rely on bilingual sentences for training, and\nrequires only one training process for multiple target languages. Experimental\nresults on five tasks with 19 languages show that our method leads to\nsignificantly improved performances for all the tasks compared with mBERT.", + "authors": "Libo Qin, Minheng Ni, Yue Zhang, Wanxiang Che", + "published": "2020-06-11", + "updated": "2020-07-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2110.08538v1", + "title": "Substructure Distribution Projection for Zero-Shot Cross-Lingual Dependency Parsing", + "abstract": "We present substructure distribution projection (SubDP), a technique that\nprojects a distribution over structures in one domain to another, by projecting\nsubstructure distributions separately. Models for the target domains can be\nthen trained, using the projected distributions as soft silver labels. We\nevaluate SubDP on zero-shot cross-lingual dependency parsing, taking dependency\narcs as substructures: we project the predicted dependency arc distributions in\nthe source language(s) to target language(s), and train a target language\nparser to fit the resulting distributions. When an English treebank is the only\nannotation that involves human effort, SubDP achieves better unlabeled\nattachment score than all prior work on the Universal Dependencies v2.2 (Nivre\net al., 2020) test set across eight diverse target languages, as well as the\nbest labeled attachment score on six out of eight languages. In addition, SubDP\nimproves zero-shot cross-lingual dependency parsing with very few (e.g., 50)\nsupervised bitext pairs, across a broader range of target languages.", + "authors": "Haoyue Shi, Kevin Gimpel, Karen Livescu", + "published": "2021-10-16", + "updated": "2021-10-16", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2203.09435v2", + "title": "Expanding Pretrained Models to Thousands More Languages via Lexicon-based Adaptation", + "abstract": "The performance of multilingual pretrained models is highly dependent on the\navailability of monolingual or parallel text present in a target language.\nThus, the majority of the world's languages cannot benefit from recent progress\nin NLP as they have no or limited textual data. To expand possibilities of\nusing NLP technology in these under-represented languages, we systematically\nstudy strategies that relax the reliance on conventional language resources\nthrough the use of bilingual lexicons, an alternative resource with much better\nlanguage coverage. We analyze different strategies to synthesize textual or\nlabeled data using lexicons, and how this data can be combined with monolingual\nor parallel text when available. For 19 under-represented languages across 3\ntasks, our methods lead to consistent improvements of up to 5 and 15 points\nwith and without extra monolingual text respectively. Overall, our study\nhighlights how NLP methods can be adapted to thousands more languages that are\nunder-served by current technology", + "authors": "Xinyi Wang, Sebastian Ruder, Graham Neubig", + "published": "2022-03-17", + "updated": "2022-04-06", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2106.02134v1", + "title": "Syntax-augmented Multilingual BERT for Cross-lingual Transfer", + "abstract": "In recent years, we have seen a colossal effort in pre-training multilingual\ntext encoders using large-scale corpora in many languages to facilitate\ncross-lingual transfer learning. However, due to typological differences across\nlanguages, the cross-lingual transfer is challenging. Nevertheless, language\nsyntax, e.g., syntactic dependencies, can bridge the typological gap. Previous\nworks have shown that pre-trained multilingual encoders, such as mBERT\n\\cite{devlin-etal-2019-bert}, capture language syntax, helping cross-lingual\ntransfer. This work shows that explicitly providing language syntax and\ntraining mBERT using an auxiliary objective to encode the universal dependency\ntree structure helps cross-lingual transfer. We perform rigorous experiments on\nfour NLP tasks, including text classification, question answering, named entity\nrecognition, and task-oriented semantic parsing. The experiment results show\nthat syntax-augmented mBERT improves cross-lingual transfer on popular\nbenchmarks, such as PAWS-X and MLQA, by 1.4 and 1.6 points on average across\nall languages. In the \\emph{generalized} transfer setting, the performance\nboosted significantly, with 3.9 and 3.1 points on average in PAWS-X and MLQA.", + "authors": "Wasi Uddin Ahmad, Haoran Li, Kai-Wei Chang, Yashar Mehdad", + "published": "2021-06-03", + "updated": "2021-06-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2011.05706v1", + "title": "Multilingual Irony Detection with Dependency Syntax and Neural Models", + "abstract": "This paper presents an in-depth investigation of the effectiveness of\ndependency-based syntactic features on the irony detection task in a\nmultilingual perspective (English, Spanish, French and Italian). It focuses on\nthe contribution from syntactic knowledge, exploiting linguistic resources\nwhere syntax is annotated according to the Universal Dependencies scheme. Three\ndistinct experimental settings are provided. In the first, a variety of\nsyntactic dependency-based features combined with classical machine learning\nclassifiers are explored. In the second scenario, two well-known types of word\nembeddings are trained on parsed data and tested against gold standard\ndatasets. In the third setting, dependency-based syntactic features are\ncombined into the Multilingual BERT architecture. The results suggest that\nfine-grained dependency-based syntactic information is informative for the\ndetection of irony.", + "authors": "Alessandra Teresa Cignarella, Valerio Basile, Manuela Sanguinetti, Cristina Bosco, Paolo Rosso, Farah Benamara", + "published": "2020-11-11", + "updated": "2020-11-11", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2212.01749v1", + "title": "Semantic Graph Neural Network with Multi-measure Learning for Semi-supervised Classification", + "abstract": "Graph Neural Networks (GNNs) have attracted increasing attention in recent\nyears and have achieved excellent performance in semi-supervised node\nclassification tasks. The success of most GNNs relies on one fundamental\nassumption, i.e., the original graph structure data is available. However,\nrecent studies have shown that GNNs are vulnerable to the complex underlying\nstructure of the graph, making it necessary to learn comprehensive and robust\ngraph structures for downstream tasks, rather than relying only on the raw\ngraph structure. In light of this, we seek to learn optimal graph structures\nfor downstream tasks and propose a novel framework for semi-supervised\nclassification. Specifically, based on the structural context information of\ngraph and node representations, we encode the complex interactions in semantics\nand generate semantic graphs to preserve the global structure. Moreover, we\ndevelop a novel multi-measure attention layer to optimize the similarity rather\nthan prescribing it a priori, so that the similarity can be adaptively\nevaluated by integrating measures. These graphs are fused and optimized\ntogether with GNN towards semi-supervised classification objective. Extensive\nexperiments and ablation studies on six real-world datasets clearly demonstrate\nthe effectiveness of our proposed model and the contribution of each component.", + "authors": "Junchao Lin, Yuan Wan, Jingwen Xu, Xingchen Qi", + "published": "2022-12-04", + "updated": "2022-12-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2210.06126v1", + "title": "Regularized Graph Structure Learning with Semantic Knowledge for Multi-variates Time-Series Forecasting", + "abstract": "Multivariate time-series forecasting is a critical task for many\napplications, and graph time-series network is widely studied due to its\ncapability to capture the spatial-temporal correlation simultaneously. However,\nmost existing works focus more on learning with the explicit prior graph\nstructure, while ignoring potential information from the implicit graph\nstructure, yielding incomplete structure modeling. Some recent works attempt to\nlearn the intrinsic or implicit graph structure directly while lacking a way to\ncombine explicit prior structure with implicit structure together. In this\npaper, we propose Regularized Graph Structure Learning (RGSL) model to\nincorporate both explicit prior structure and implicit structure together, and\nlearn the forecasting deep networks along with the graph structure. RGSL\nconsists of two innovative modules. First, we derive an implicit dense\nsimilarity matrix through node embedding, and learn the sparse graph structure\nusing the Regularized Graph Generation (RGG) based on the Gumbel Softmax trick.\nSecond, we propose a Laplacian Matrix Mixed-up Module (LM3) to fuse the\nexplicit graph and implicit graph together. We conduct experiments on three\nreal-word datasets. Results show that the proposed RGSL model outperforms\nexisting graph forecasting algorithms with a notable margin, while learning\nmeaningful graph structure simultaneously. Our code and models are made\npublicly available at https://github.com/alipay/RGSL.git.", + "authors": "Hongyuan Yu, Ting Li, Weichen Yu, Jianguo Li, Yan Huang, Liang Wang, Alex Liu", + "published": "2022-10-12", + "updated": "2022-10-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.13009v2", + "title": "Iterative Deep Graph Learning for Graph Neural Networks: Better and Robust Node Embeddings", + "abstract": "In this paper, we propose an end-to-end graph learning framework, namely\nIterative Deep Graph Learning (IDGL), for jointly and iteratively learning\ngraph structure and graph embedding. The key rationale of IDGL is to learn a\nbetter graph structure based on better node embeddings, and vice versa (i.e.,\nbetter node embeddings based on a better graph structure). Our iterative method\ndynamically stops when the learned graph structure approaches close enough to\nthe graph optimized for the downstream prediction task. In addition, we cast\nthe graph learning problem as a similarity metric learning problem and leverage\nadaptive graph regularization for controlling the quality of the learned graph.\nFinally, combining the anchor-based approximation technique, we further propose\na scalable version of IDGL, namely IDGL-Anch, which significantly reduces the\ntime and space complexity of IDGL without compromising the performance. Our\nextensive experiments on nine benchmarks show that our proposed IDGL models can\nconsistently outperform or match the state-of-the-art baselines. Furthermore,\nIDGL can be more robust to adversarial graphs and cope with both transductive\nand inductive learning.", + "authors": "Yu Chen, Lingfei Wu, Mohammed J. Zaki", + "published": "2020-06-21", + "updated": "2020-10-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2105.00696v1", + "title": "Graph Learning: A Survey", + "abstract": "Graphs are widely used as a popular representation of the network structure\nof connected data. Graph data can be found in a broad spectrum of application\ndomains such as social systems, ecosystems, biological networks, knowledge\ngraphs, and information systems. With the continuous penetration of artificial\nintelligence technologies, graph learning (i.e., machine learning on graphs) is\ngaining attention from both researchers and practitioners. Graph learning\nproves effective for many tasks, such as classification, link prediction, and\nmatching. Generally, graph learning methods extract relevant features of graphs\nby taking advantage of machine learning algorithms. In this survey, we present\na comprehensive overview on the state-of-the-art of graph learning. Special\nattention is paid to four categories of existing graph learning methods,\nincluding graph signal processing, matrix factorization, random walk, and deep\nlearning. Major models and algorithms under these categories are reviewed\nrespectively. We examine graph learning applications in areas such as text,\nimages, science, knowledge graphs, and combinatorial optimization. In addition,\nwe discuss several promising research directions in this field.", + "authors": "Feng Xia, Ke Sun, Shuo Yu, Abdul Aziz, Liangtian Wan, Shirui Pan, Huan Liu", + "published": "2021-05-03", + "updated": "2021-05-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI", + "68T07", + "I.2.6" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.08057v1", + "title": "Graph Embedding VAE: A Permutation Invariant Model of Graph Structure", + "abstract": "Generative models of graph structure have applications in biology and social\nsciences. The state of the art is GraphRNN, which decomposes the graph\ngeneration process into a series of sequential steps. While effective for\nmodest sizes, it loses its permutation invariance for larger graphs. Instead,\nwe present a permutation invariant latent-variable generative model relying on\ngraph embeddings to encode structure. Using tools from the random graph\nliterature, our model is highly scalable to large graphs with likelihood\nevaluation and generation in $O(|V | + |E|)$.", + "authors": "Tony Duan, Juho Lee", + "published": "2019-10-17", + "updated": "2019-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1801.03226v1", + "title": "Adaptive Graph Convolutional Neural Networks", + "abstract": "Graph Convolutional Neural Networks (Graph CNNs) are generalizations of\nclassical CNNs to handle graph data such as molecular data, point could and\nsocial networks. Current filters in graph CNNs are built for fixed and shared\ngraph structure. However, for most real data, the graph structures varies in\nboth size and connectivity. The paper proposes a generalized and flexible graph\nCNN taking data of arbitrary graph structure as input. In that way a\ntask-driven adaptive graph is learned for each graph data while training. To\nefficiently learn the graph, a distance metric learning is proposed. Extensive\nexperiments on nine graph-structured datasets have demonstrated the superior\nperformance improvement on both convergence speed and predictive accuracy.", + "authors": "Ruoyu Li, Sheng Wang, Feiyun Zhu, Junzhou Huang", + "published": "2018-01-10", + "updated": "2018-01-10", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.11307v3", + "title": "Transforming Graphs for Enhanced Attribute Clustering: An Innovative Graph Transformer-Based Method", + "abstract": "Graph Representation Learning (GRL) is an influential methodology, enabling a\nmore profound understanding of graph-structured data and aiding graph\nclustering, a critical task across various domains. The recent incursion of\nattention mechanisms, originally an artifact of Natural Language Processing\n(NLP), into the realm of graph learning has spearheaded a notable shift in\nresearch trends. Consequently, Graph Attention Networks (GATs) and Graph\nAttention Auto-Encoders have emerged as preferred tools for graph clustering\ntasks. Yet, these methods primarily employ a local attention mechanism, thereby\ncurbing their capacity to apprehend the intricate global dependencies between\nnodes within graphs. Addressing these impediments, this study introduces an\ninnovative method known as the Graph Transformer Auto-Encoder for Graph\nClustering (GTAGC). By melding the Graph Auto-Encoder with the Graph\nTransformer, GTAGC is adept at capturing global dependencies between nodes.\nThis integration amplifies the graph representation and surmounts the\nconstraints posed by the local attention mechanism. The architecture of GTAGC\nencompasses graph embedding, integration of the Graph Transformer within the\nautoencoder structure, and a clustering component. It strategically alternates\nbetween graph embedding and clustering, thereby tailoring the Graph Transformer\nfor clustering tasks, whilst preserving the graph's global structural\ninformation. Through extensive experimentation on diverse benchmark datasets,\nGTAGC has exhibited superior performance against existing state-of-the-art\ngraph clustering methodologies.", + "authors": "Shuo Han, Jiacheng Liu, Jiayun Wu, Yinan Chen, Li Tao", + "published": "2023-06-20", + "updated": "2023-08-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2311.11821v1", + "title": "Cross-View Graph Consistency Learning for Invariant Graph Representations", + "abstract": "Graph representation learning is fundamental for analyzing graph-structured\ndata. Exploring invariant graph representations remains a challenge for most\nexisting graph representation learning methods. In this paper, we propose a\ncross-view graph consistency learning (CGCL) method that learns invariant graph\nrepresentations for link prediction. First, two complementary augmented views\nare derived from an incomplete graph structure through a bidirectional graph\nstructure augmentation scheme. This augmentation scheme mitigates the potential\ninformation loss that is commonly associated with various data augmentation\ntechniques involving raw graph data, such as edge perturbation, node removal,\nand attribute masking. Second, we propose a CGCL model that can learn invariant\ngraph representations. A cross-view training scheme is proposed to train the\nproposed CGCL model. This scheme attempts to maximize the consistency\ninformation between one augmented view and the graph structure reconstructed\nfrom the other augmented view. Furthermore, we offer a comprehensive\ntheoretical CGCL analysis. This paper empirically and experimentally\ndemonstrates the effectiveness of the proposed CGCL method, achieving\ncompetitive results on graph datasets in comparisons with several\nstate-of-the-art algorithms.", + "authors": "Jie Chen, Zhiming Li, Hua Mao, Wai Lok Woo, Xi Peng", + "published": "2023-11-20", + "updated": "2023-11-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1912.07832v1", + "title": "Deep Iterative and Adaptive Learning for Graph Neural Networks", + "abstract": "In this paper, we propose an end-to-end graph learning framework, namely Deep\nIterative and Adaptive Learning for Graph Neural Networks (DIAL-GNN), for\njointly learning the graph structure and graph embeddings simultaneously. We\nfirst cast the graph structure learning problem as a similarity metric learning\nproblem and leverage an adapted graph regularization for controlling\nsmoothness, connectivity and sparsity of the generated graph. We further\npropose a novel iterative method for searching for a hidden graph structure\nthat augments the initial graph structure. Our iterative method dynamically\nstops when the learned graph structure approaches close enough to the optimal\ngraph. Our extensive experiments demonstrate that the proposed DIAL-GNN model\ncan consistently outperform or match state-of-the-art baselines in terms of\nboth downstream task performance and computational time. The proposed approach\ncan cope with both transductive learning and inductive learning.", + "authors": "Yu Chen, Lingfei Wu, Mohammed J. Zaki", + "published": "2019-12-17", + "updated": "2019-12-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.10146v2", + "title": "Exploring Structure-Adaptive Graph Learning for Robust Semi-Supervised Classification", + "abstract": "Graph Convolutional Neural Networks (GCNNs) are generalizations of CNNs to\ngraph-structured data, in which convolution is guided by the graph topology. In\nmany cases where graphs are unavailable, existing methods manually construct\ngraphs or learn task-driven adaptive graphs. In this paper, we propose Graph\nLearning Neural Networks (GLNNs), which exploit the optimization of graphs (the\nadjacency matrix in particular) from both data and tasks. Leveraging on\nspectral graph theory, we propose the objective of graph learning from a\nsparsity constraint, properties of a valid adjacency matrix as well as a graph\nLaplacian regularizer via maximum a posteriori estimation. The optimization\nobjective is then integrated into the loss function of the GCNN, which adapts\nthe graph topology to not only labels of a specific task but also the input\ndata. Experimental results show that our proposed GLNN outperforms\nstate-of-the-art approaches over widely adopted social network datasets and\ncitation network datasets for semi-supervised classification.", + "authors": "Xiang Gao, Wei Hu, Zongming Guo", + "published": "2019-04-23", + "updated": "2019-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2209.00793v2", + "title": "Structure-Preserving Graph Representation Learning", + "abstract": "Though graph representation learning (GRL) has made significant progress, it\nis still a challenge to extract and embed the rich topological structure and\nfeature information in an adequate way. Most existing methods focus on local\nstructure and fail to fully incorporate the global topological structure. To\nthis end, we propose a novel Structure-Preserving Graph Representation Learning\n(SPGRL) method, to fully capture the structure information of graphs.\nSpecifically, to reduce the uncertainty and misinformation of the original\ngraph, we construct a feature graph as a complementary view via k-Nearest\nNeighbor method. The feature graph can be used to contrast at node-level to\ncapture the local relation. Besides, we retain the global topological structure\ninformation by maximizing the mutual information (MI) of the whole graph and\nfeature embeddings, which is theoretically reduced to exchanging the feature\nembeddings of the feature and the original graphs to reconstruct themselves.\nExtensive experiments show that our method has quite superior performance on\nsemi-supervised node classification task and excellent robustness under noise\nperturbation on graph structure or node features.", + "authors": "Ruiyi Fang, Liangjian Wen, Zhao Kang, Jianzhuang Liu", + "published": "2022-09-02", + "updated": "2022-12-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2302.02909v1", + "title": "Spectral Augmentations for Graph Contrastive Learning", + "abstract": "Contrastive learning has emerged as a premier method for learning\nrepresentations with or without supervision. Recent studies have shown its\nutility in graph representation learning for pre-training. Despite successes,\nthe understanding of how to design effective graph augmentations that can\ncapture structural properties common to many different types of downstream\ngraphs remains incomplete. We propose a set of well-motivated graph\ntransformation operations derived via graph spectral analysis to provide a bank\nof candidates when constructing augmentations for a graph contrastive\nobjective, enabling contrastive learning to capture useful structural\nrepresentation from pre-training graph datasets. We first present a spectral\ngraph cropping augmentation that involves filtering nodes by applying\nthresholds to the eigenvalues of the leading Laplacian eigenvectors. Our second\nnovel augmentation reorders the graph frequency components in a structural\nLaplacian-derived position graph embedding. Further, we introduce a method that\nleads to improved views of local subgraphs by performing alignment via global\nrandom walk embeddings. Our experimental results indicate consistent\nimprovements in out-of-domain graph data transfer compared to state-of-the-art\ngraph contrastive learning methods, shedding light on how to design a graph\nlearner that is able to learn structural properties common to diverse graph\ntypes.", + "authors": "Amur Ghose, Yingxue Zhang, Jianye Hao, Mark Coates", + "published": "2023-02-06", + "updated": "2023-02-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2206.08561v1", + "title": "Boosting Graph Structure Learning with Dummy Nodes", + "abstract": "With the development of graph kernels and graph representation learning, many\nsuperior methods have been proposed to handle scalability and oversmoothing\nissues on graph structure learning. However, most of those strategies are\ndesigned based on practical experience rather than theoretical analysis. In\nthis paper, we use a particular dummy node connecting to all existing vertices\nwithout affecting original vertex and edge properties. We further prove that\nsuch the dummy node can help build an efficient monomorphic edge-to-vertex\ntransform and an epimorphic inverse to recover the original graph back. It also\nindicates that adding dummy nodes can preserve local and global structures for\nbetter graph representation learning. We extend graph kernels and graph neural\nnetworks with dummy nodes and conduct experiments on graph classification and\nsubgraph isomorphism matching tasks. Empirical results demonstrate that taking\ngraphs with dummy nodes as input significantly boosts graph structure learning,\nand using their edge-to-vertex graphs can also achieve similar results. We also\ndiscuss the gain of expressive power from the dummy in neural networks.", + "authors": "Xin Liu, Jiayang Cheng, Yangqiu Song, Xin Jiang", + "published": "2022-06-17", + "updated": "2022-06-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1104.5256v1", + "title": "Learning Undirected Graphical Models with Structure Penalty", + "abstract": "In undirected graphical models, learning the graph structure and learning the\nfunctions that relate the predictive variables (features) to the responses\ngiven the structure are two topics that have been widely investigated in\nmachine learning and statistics. Learning graphical models in two stages will\nhave problems because graph structure may change after considering the\nfeatures. The main contribution of this paper is the proposed method that\nlearns the graph structure and functions on the graph at the same time. General\ngraphical models with binary outcomes conditioned on predictive variables are\nproved to be equivalent to multivariate Bernoulli model. The reparameterization\nof the potential functions in graphical model by conditional log odds ratios in\nmultivariate Bernoulli model offers advantage in the representation of the\nconditional independence structure in the model. Additionally, we impose a\nstructure penalty on groups of conditional log odds ratios to learn the graph\nstructure. These groups of functions are designed with overlaps to enforce\nhierarchical function selection. In this way, we are able to shrink higher\norder interactions to obtain a sparse graph structure. Simulation studies show\nthat the method is able to recover the graph structure. The analysis of county\ndata from Census Bureau gives interesting relations between unemployment rate,\ncrime and others discovered by the model.", + "authors": "Shilin Ding", + "published": "2011-04-27", + "updated": "2011-04-27", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.02879v1", + "title": "Auto-decoding Graphs", + "abstract": "We present an approach to synthesizing new graph structures from empirically\nspecified distributions. The generative model is an auto-decoder that learns to\nsynthesize graphs from latent codes. The graph synthesis model is learned\njointly with an empirical distribution over the latent codes. Graphs are\nsynthesized using self-attention modules that are trained to identify likely\nconnectivity patterns. Graph-based normalizing flows are used to sample latent\ncodes from the distribution learned by the auto-decoder. The resulting model\ncombines accuracy and scalability. On benchmark datasets of large graphs, the\npresented model outperforms the state of the art by a factor of 1.5 in mean\naccuracy and average rank across at least three different graph statistics,\nwith a 2x speedup during inference.", + "authors": "Sohil Atul Shah, Vladlen Koltun", + "published": "2020-06-04", + "updated": "2020-06-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.16176v1", + "title": "A Survey on Structure-Preserving Graph Transformers", + "abstract": "The transformer architecture has shown remarkable success in various domains,\nsuch as natural language processing and computer vision. When it comes to graph\nlearning, transformers are required not only to capture the interactions\nbetween pairs of nodes but also to preserve graph structures connoting the\nunderlying relations and proximity between them, showing the expressive power\nto capture different graph structures. Accordingly, various\nstructure-preserving graph transformers have been proposed and widely used for\nvarious tasks, such as graph-level tasks in bioinformatics and\nchemoinformatics. However, strategies related to graph structure preservation\nhave not been well organized and systematized in the literature. In this paper,\nwe provide a comprehensive overview of structure-preserving graph transformers\nand generalize these methods from the perspective of their design objective.\nFirst, we divide strategies into four main groups: node feature modulation,\ncontext node sampling, graph rewriting, and transformer architecture\nimprovements. We then further divide the strategies according to the coverage\nand goals of graph structure preservation. Furthermore, we also discuss\nchallenges and future directions for graph transformer models to preserve the\ngraph structure and understand the nature of graphs.", + "authors": "Van Thuy Hoang, O-Joun Lee", + "published": "2024-01-29", + "updated": "2024-01-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.13769v1", + "title": "Multiview Graph Learning with Consensus Graph", + "abstract": "Graph topology inference, i.e., learning graphs from a given set of nodal\nobservations, is a significant task in many application domains. Existing\napproaches are mostly limited to learning a single graph assuming that the\nobserved data is homogeneous. This is problematic because many modern datasets\nare heterogeneous or mixed and involve multiple related graphs, i.e., multiview\ngraphs. Recent work proposing to learn multiview graphs ensures the similarity\nof learned view graphs through pairwise regularization, where each pair of\nviews is encouraged to have similar structures. However, this approach cannot\ninfer the shared structure across views. In this work, we propose an\nalternative method based on consensus regularization, where views are ensured\nto be similar through a learned consensus graph representing the common\nstructure of the views. In particular, we propose an optimization problem,\nwhere graph data is assumed to be smooth over the multiview graph and the\ntopology of the individual views and that of the consensus graph are learned,\nsimultaneously. Our optimization problem is designed to be general in the sense\nthat different regularization functions can be used depending on what the\nshared structure across views is. Moreover, we propose two regularization\nfunctions that extend fused and group graphical lasso to consensus based\nregularization. Proposed multiview graph learning is evaluated on simulated\ndata and shown to have better performance than existing methods. It is also\nemployed to infer the functional brain connectivity networks of multiple\nsubjects from their electroencephalogram (EEG) recordings. The proposed method\nreveals the structure shared by subjects as well as the characteristics unique\nto each subject.", + "authors": "Abdullah Karaaslanli, Selin Aviyente", + "published": "2024-01-24", + "updated": "2024-01-24", + "primary_cat": "eess.SP", + "cats": [ + "eess.SP", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2004.06846v1", + "title": "MxPool: Multiplex Pooling for Hierarchical Graph Representation Learning", + "abstract": "How to utilize deep learning methods for graph classification tasks has\nattracted considerable research attention in the past few years. Regarding\ngraph classification tasks, the graphs to be classified may have various graph\nsizes (i.e., different number of nodes and edges) and have various graph\nproperties (e.g., average node degree, diameter, and clustering coefficient).\nThe diverse property of graphs has imposed significant challenges on existing\ngraph learning techniques since diverse graphs have different best-fit\nhyperparameters. It is difficult to learn graph features from a set of diverse\ngraphs by a unified graph neural network. This motivates us to use a multiplex\nstructure in a diverse way and utilize a priori properties of graphs to guide\nthe learning. In this paper, we propose MxPool, which concurrently uses\nmultiple graph convolution/pooling networks to build a hierarchical learning\nstructure for graph representation learning tasks. Our experiments on numerous\ngraph classification benchmarks show that our MxPool has superiority over other\nstate-of-the-art graph representation learning methods.", + "authors": "Yanyan Liang, Yanfeng Zhang, Dechao Gao, Qian Xu", + "published": "2020-04-15", + "updated": "2020-04-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1905.10715v1", + "title": "Graph Attention Auto-Encoders", + "abstract": "Auto-encoders have emerged as a successful framework for unsupervised\nlearning. However, conventional auto-encoders are incapable of utilizing\nexplicit relations in structured data. To take advantage of relations in\ngraph-structured data, several graph auto-encoders have recently been proposed,\nbut they neglect to reconstruct either the graph structure or node attributes.\nIn this paper, we present the graph attention auto-encoder (GATE), a neural\nnetwork architecture for unsupervised representation learning on\ngraph-structured data. Our architecture is able to reconstruct graph-structured\ninputs, including both node attributes and the graph structure, through stacked\nencoder/decoder layers equipped with self-attention mechanisms. In the encoder,\nby considering node attributes as initial node representations, each layer\ngenerates new representations of nodes by attending over their neighbors'\nrepresentations. In the decoder, we attempt to reverse the encoding process to\nreconstruct node attributes. Moreover, node representations are regularized to\nreconstruct the graph structure. Our proposed architecture does not need to\nknow the graph structure upfront, and thus it can be applied to inductive\nlearning. Our experiments demonstrate competitive performance on several node\nclassification benchmark datasets for transductive and inductive tasks, even\nexceeding the performance of supervised learning baselines in most cases.", + "authors": "Amin Salehi, Hasan Davulcu", + "published": "2019-05-26", + "updated": "2019-05-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2302.03596v3", + "title": "Graph Generation with Diffusion Mixture", + "abstract": "Generation of graphs is a major challenge for real-world tasks that require\nunderstanding the complex nature of their non-Euclidean structures. Although\ndiffusion models have achieved notable success in graph generation recently,\nthey are ill-suited for modeling the topological properties of graphs since\nlearning to denoise the noisy samples does not explicitly learn the graph\nstructures to be generated. To tackle this limitation, we propose a generative\nframework that models the topology of graphs by explicitly learning the final\ngraph structures of the diffusion process. Specifically, we design the\ngenerative process as a mixture of endpoint-conditioned diffusion processes\nwhich is driven toward the predicted graph that results in rapid convergence.\nWe further introduce a simple parameterization of the mixture process and\ndevelop an objective for learning the final graph structure, which enables\nmaximum likelihood training. Through extensive experimental validation on\ngeneral graph and 2D/3D molecule generation tasks, we show that our method\noutperforms previous generative models, generating graphs with correct topology\nwith both continuous (e.g. 3D coordinates) and discrete (e.g. atom types)\nfeatures. Our code is available at https://github.com/harryjo97/DruM.", + "authors": "Jaehyeong Jo, Dongki Kim, Sung Ju Hwang", + "published": "2023-02-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1901.07439v1", + "title": "Multiple Graph Adversarial Learning", + "abstract": "Recently, Graph Convolutional Networks (GCNs) have been widely studied for\ngraph-structured data representation and learning. However, in many real\napplications, data are coming with multiple graphs, and it is non-trivial to\nadapt GCNs to deal with data representation with multiple graph structures. One\nmain challenge for multi-graph representation is how to exploit both structure\ninformation of each individual graph and correlation information across\nmultiple graphs simultaneously. In this paper, we propose a novel Multiple\nGraph Adversarial Learning (MGAL) framework for multi-graph representation and\nlearning. MGAL aims to learn an optimal structure-invariant and consistent\nrepresentation for multiple graphs in a common subspace via a novel adversarial\nlearning framework, which thus incorporates both structure information of\nintra-graph and correlation information of inter-graphs simultaneously. Based\non MGAL, we then provide a unified network for semi-supervised learning task.\nPromising experimental results demonstrate the effectiveness of MGAL model.", + "authors": "Bo Jiang, Ziyan Zhang, Jin Tang, Bin Luo", + "published": "2019-01-22", + "updated": "2019-01-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1611.05181v3", + "title": "Graph Learning from Data under Structural and Laplacian Constraints", + "abstract": "Graphs are fundamental mathematical structures used in various fields to\nrepresent data, signals and processes. In this paper, we propose a novel\nframework for learning/estimating graphs from data. The proposed framework\nincludes (i) formulation of various graph learning problems, (ii) their\nprobabilistic interpretations and (iii) associated algorithms. Specifically,\ngraph learning problems are posed as estimation of graph Laplacian matrices\nfrom some observed data under given structural constraints (e.g., graph\nconnectivity and sparsity level). From a probabilistic perspective, the\nproblems of interest correspond to maximum a posteriori (MAP) parameter\nestimation of Gaussian-Markov random field (GMRF) models, whose precision\n(inverse covariance) is a graph Laplacian matrix. For the proposed graph\nlearning problems, specialized algorithms are developed by incorporating the\ngraph Laplacian and structural constraints. The experimental results\ndemonstrate that the proposed algorithms outperform the current\nstate-of-the-art methods in terms of accuracy and computational efficiency.", + "authors": "Hilmi E. Egilmez, Eduardo Pavez, Antonio Ortega", + "published": "2016-11-16", + "updated": "2017-07-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.02664v2", + "title": "Structure-free Graph Condensation: From Large-scale Graphs to Condensed Graph-free Data", + "abstract": "Graph condensation, which reduces the size of a large-scale graph by\nsynthesizing a small-scale condensed graph as its substitution, has immediate\nbenefits for various graph learning tasks. However, existing graph condensation\nmethods rely on the joint optimization of nodes and structures in the condensed\ngraph, and overlook critical issues in effectiveness and generalization\nability. In this paper, we advocate a new Structure-Free Graph Condensation\nparadigm, named SFGC, to distill a large-scale graph into a small-scale graph\nnode set without explicit graph structures, i.e., graph-free data. Our idea is\nto implicitly encode topology structure information into the node attributes in\nthe synthesized graph-free data, whose topology is reduced to an identity\nmatrix. Specifically, SFGC contains two collaborative components: (1) a\ntraining trajectory meta-matching scheme for effectively synthesizing\nsmall-scale graph-free data; (2) a graph neural feature score metric for\ndynamically evaluating the quality of the condensed data. Through training\ntrajectory meta-matching, SFGC aligns the long-term GNN learning behaviors\nbetween the large-scale graph and the condensed small-scale graph-free data,\nensuring comprehensive and compact transfer of informative knowledge to the\ngraph-free data. Afterward, the underlying condensed graph-free data would be\ndynamically evaluated with the graph neural feature score, which is a\nclosed-form metric for ensuring the excellent expressiveness of the condensed\ngraph-free data. Extensive experiments verify the superiority of SFGC across\ndifferent condensation ratios.", + "authors": "Xin Zheng, Miao Zhang, Chunyang Chen, Quoc Viet Hung Nguyen, Xingquan Zhu, Shirui Pan", + "published": "2023-06-05", + "updated": "2023-10-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.03262v2", + "title": "CGCL: Collaborative Graph Contrastive Learning without Handcrafted Graph Data Augmentations", + "abstract": "Unsupervised graph representation learning is a non-trivial topic. The\nsuccess of contrastive methods in the unsupervised representation learning on\nstructured data inspires similar attempts on the graph. Existing graph\ncontrastive learning (GCL) aims to learn the invariance across multiple\naugmentation views, which renders it heavily reliant on the handcrafted graph\naugmentations. However, inappropriate graph data augmentations can potentially\njeopardize such invariance. In this paper, we show the potential hazards of\ninappropriate augmentations and then propose a novel Collaborative Graph\nContrastive Learning framework (CGCL). This framework harnesses multiple graph\nencoders to observe the graph. Features observed from different encoders serve\nas the contrastive views in contrastive learning, which avoids inducing\nunstable perturbation and guarantees the invariance. To ensure the\ncollaboration among diverse graph encoders, we propose the concepts of\nasymmetric architecture and complementary encoders as the design principle. To\nfurther prove the rationality, we utilize two quantitative metrics to measure\nthe assembly of CGCL respectively. Extensive experiments demonstrate the\nadvantages of CGCL in unsupervised graph-level representation learning and the\npotential of collaborative framework. The source code for reproducibility is\navailable at https://github.com/zhangtia16/CGCL", + "authors": "Tianyu Zhang, Yuxiang Ren, Wenzheng Feng, Weitao Du, Xuecang Zhang", + "published": "2021-11-05", + "updated": "2024-04-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.07699v2", + "title": "Time-aware Graph Structure Learning via Sequence Prediction on Temporal Graphs", + "abstract": "Temporal Graph Learning, which aims to model the time-evolving nature of\ngraphs, has gained increasing attention and achieved remarkable performance\nrecently. However, in reality, graph structures are often incomplete and noisy,\nwhich hinders temporal graph networks (TGNs) from learning informative\nrepresentations. Graph contrastive learning uses data augmentation to generate\nplausible variations of existing data and learn robust representations.\nHowever, rule-based augmentation approaches may be suboptimal as they lack\nlearnability and fail to leverage rich information from downstream tasks. To\naddress these issues, we propose a Time-aware Graph Structure Learning (TGSL)\napproach via sequence prediction on temporal graphs, which learns better graph\nstructures for downstream tasks through adding potential temporal edges. In\nparticular, it predicts time-aware context embedding based on previously\nobserved interactions and uses the Gumble-Top-K to select the closest candidate\nedges to this context embedding. Additionally, several candidate sampling\nstrategies are proposed to ensure both efficiency and diversity. Furthermore,\nwe jointly learn the graph structure and TGNs in an end-to-end manner and\nperform inference on the refined graph. Extensive experiments on temporal link\nprediction benchmarks demonstrate that TGSL yields significant gains for the\npopular TGNs such as TGAT and GraphMixer, and it outperforms other contrastive\nlearning methods on temporal graphs. We release the code at\nhttps://github.com/ViktorAxelsen/TGSL.", + "authors": "Haozhen Zhang, Xueting Han, Xi Xiao, Jing Bai", + "published": "2023-06-13", + "updated": "2023-08-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1909.11594v1", + "title": "Structured Graph Learning Via Laplacian Spectral Constraints", + "abstract": "Learning a graph with a specific structure is essential for interpretability\nand identification of the relationships among data. It is well known that\nstructured graph learning from observed samples is an NP-hard combinatorial\nproblem. In this paper, we first show that for a set of important graph\nfamilies it is possible to convert the structural constraints of structure into\neigenvalue constraints of the graph Laplacian matrix. Then we introduce a\nunified graph learning framework, lying at the integration of the spectral\nproperties of the Laplacian matrix with Gaussian graphical modeling that is\ncapable of learning structures of a large class of graph families. The proposed\nalgorithms are provably convergent and practically amenable for large-scale\nsemi-supervised and unsupervised graph-based learning tasks. Extensive\nnumerical experiments with both synthetic and real data sets demonstrate the\neffectiveness of the proposed methods. An R package containing code for all the\nexperimental results is available at\nhttps://cran.r-project.org/package=spectralGraphTopology.", + "authors": "Sandeep Kumar, Jiaxi Ying, Jos'e Vin'icius de M. Cardoso, Daniel P. Palomar", + "published": "2019-09-24", + "updated": "2019-09-24", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "cs.SI", + "math.OC", + "stat.AP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2201.07409v2", + "title": "Dual Space Graph Contrastive Learning", + "abstract": "Unsupervised graph representation learning has emerged as a powerful tool to\naddress real-world problems and achieves huge success in the graph learning\ndomain. Graph contrastive learning is one of the unsupervised graph\nrepresentation learning methods, which recently attracts attention from\nresearchers and has achieved state-of-the-art performances on various tasks.\nThe key to the success of graph contrastive learning is to construct proper\ncontrasting pairs to acquire the underlying structural semantics of the graph.\nHowever, this key part is not fully explored currently, most of the ways\ngenerating contrasting pairs focus on augmenting or perturbating graph\nstructures to obtain different views of the input graph. But such strategies\ncould degrade the performances via adding noise into the graph, which may\nnarrow down the field of the applications of graph contrastive learning. In\nthis paper, we propose a novel graph contrastive learning method, namely\n\\textbf{D}ual \\textbf{S}pace \\textbf{G}raph \\textbf{C}ontrastive (DSGC)\nLearning, to conduct graph contrastive learning among views generated in\ndifferent spaces including the hyperbolic space and the Euclidean space. Since\nboth spaces have their own advantages to represent graph data in the embedding\nspaces, we hope to utilize graph contrastive learning to bridge the spaces and\nleverage advantages from both sides. The comparison experiment results show\nthat DSGC achieves competitive or better performances among all the datasets.\nIn addition, we conduct extensive experiments to analyze the impact of\ndifferent graph encoders on DSGC, giving insights about how to better leverage\nthe advantages of contrastive learning between different spaces.", + "authors": "Haoran Yang, Hongxu Chen, Shirui Pan, Lin Li, Philip S. Yu, Guandong Xu", + "published": "2022-01-19", + "updated": "2022-03-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1802.04407v2", + "title": "Adversarially Regularized Graph Autoencoder for Graph Embedding", + "abstract": "Graph embedding is an effective method to represent graph data in a low\ndimensional space for graph analytics. Most existing embedding algorithms\ntypically focus on preserving the topological structure or minimizing the\nreconstruction errors of graph data, but they have mostly ignored the data\ndistribution of the latent codes from the graphs, which often results in\ninferior embedding in real-world graph data. In this paper, we propose a novel\nadversarial graph embedding framework for graph data. The framework encodes the\ntopological structure and node content in a graph to a compact representation,\non which a decoder is trained to reconstruct the graph structure. Furthermore,\nthe latent representation is enforced to match a prior distribution via an\nadversarial training scheme. To learn a robust embedding, two variants of\nadversarial approaches, adversarially regularized graph autoencoder (ARGA) and\nadversarially regularized variational graph autoencoder (ARVGA), are developed.\nExperimental studies on real-world graphs validate our design and demonstrate\nthat our algorithms outperform baselines by a wide margin in link prediction,\ngraph clustering, and graph visualization tasks.", + "authors": "Shirui Pan, Ruiqi Hu, Guodong Long, Jing Jiang, Lina Yao, Chengqi Zhang", + "published": "2018-02-13", + "updated": "2019-01-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.10124v1", + "title": "Graph Context Encoder: Graph Feature Inpainting for Graph Generation and Self-supervised Pretraining", + "abstract": "We propose the Graph Context Encoder (GCE), a simple but efficient approach\nfor graph representation learning based on graph feature masking and\nreconstruction.\n GCE models are trained to efficiently reconstruct input graphs similarly to a\ngraph autoencoder where node and edge labels are masked. In particular, our\nmodel is also allowed to change graph structures by masking and reconstructing\ngraphs augmented by random pseudo-edges.\n We show that GCE can be used for novel graph generation, with applications\nfor molecule generation. Used as a pretraining method, we also show that GCE\nimproves baseline performances in supervised classification tasks tested on\nmultiple standard benchmark graph datasets.", + "authors": "Oriel Frigo, R\u00e9my Brossard, David Dehaene", + "published": "2021-06-18", + "updated": "2021-06-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "68T07" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1903.00614v1", + "title": "GAP: Generalizable Approximate Graph Partitioning Framework", + "abstract": "Graph partitioning is the problem of dividing the nodes of a graph into\nbalanced partitions while minimizing the edge cut across the partitions. Due to\nits combinatorial nature, many approximate solutions have been developed,\nincluding variants of multi-level methods and spectral clustering. We propose\nGAP, a Generalizable Approximate Partitioning framework that takes a deep\nlearning approach to graph partitioning. We define a differentiable loss\nfunction that represents the partitioning objective and use backpropagation to\noptimize the network parameters. Unlike baselines that redo the optimization\nper graph, GAP is capable of generalization, allowing us to train models that\nproduce performant partitions at inference time, even on unseen graphs.\nFurthermore, because we learn the representation of the graph while jointly\noptimizing for the partitioning loss function, GAP can be easily tuned for a\nvariety of graph structures. We evaluate the performance of GAP on graphs of\nvarying sizes and structures, including graphs of widely used machine learning\nmodels (e.g., ResNet, VGG, and Inception-V3), scale-free graphs, and random\ngraphs. We show that GAP achieves competitive partitions while being up to 100\ntimes faster than the baseline and generalizes to unseen graphs.", + "authors": "Azade Nazi, Will Hang, Anna Goldie, Sujith Ravi, Azalia Mirhoseini", + "published": "2019-03-02", + "updated": "2019-03-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2104.09304v1", + "title": "A Tunable Model for Graph Generation Using LSTM and Conditional VAE", + "abstract": "With the development of graph applications, generative models for graphs have\nbeen more crucial. Classically, stochastic models that generate graphs with a\npre-defined probability of edges and nodes have been studied. Recently, some\nmodels that reproduce the structural features of graphs by learning from actual\ngraph data using machine learning have been studied. However, in these\nconventional studies based on machine learning, structural features of graphs\ncan be learned from data, but it is not possible to tune features and generate\ngraphs with specific features. In this paper, we propose a generative model\nthat can tune specific features, while learning structural features of a graph\nfrom data. With a dataset of graphs with various features generated by a\nstochastic model, we confirm that our model can generate a graph with specific\nfeatures.", + "authors": "Shohei Nakazawa, Yoshiki Sato, Kenji Nakagawa, Sho Tsugawa, Kohei Watabe", + "published": "2021-04-15", + "updated": "2021-04-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1911.05954v3", + "title": "Hierarchical Graph Pooling with Structure Learning", + "abstract": "Graph Neural Networks (GNNs), which generalize deep neural networks to\ngraph-structured data, have drawn considerable attention and achieved\nstate-of-the-art performance in numerous graph related tasks. However, existing\nGNN models mainly focus on designing graph convolution operations. The graph\npooling (or downsampling) operations, that play an important role in learning\nhierarchical representations, are usually overlooked. In this paper, we propose\na novel graph pooling operator, called Hierarchical Graph Pooling with\nStructure Learning (HGP-SL), which can be integrated into various graph neural\nnetwork architectures. HGP-SL incorporates graph pooling and structure learning\ninto a unified module to generate hierarchical representations of graphs. More\nspecifically, the graph pooling operation adaptively selects a subset of nodes\nto form an induced subgraph for the subsequent layers. To preserve the\nintegrity of graph's topological information, we further introduce a structure\nlearning mechanism to learn a refined graph structure for the pooled graph at\neach layer. By combining HGP-SL operator with graph neural networks, we perform\ngraph level representation learning with focus on graph classification task.\nExperimental results on six widely used benchmarks demonstrate the\neffectiveness of our proposed model.", + "authors": "Zhen Zhang, Jiajun Bu, Martin Ester, Jianfeng Zhang, Chengwei Yao, Zhi Yu, Can Wang", + "published": "2019-11-14", + "updated": "2019-12-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2312.04762v1", + "title": "The Graph Lottery Ticket Hypothesis: Finding Sparse, Informative Graph Structure", + "abstract": "Graph learning methods help utilize implicit relationships among data items,\nthereby reducing training label requirements and improving task performance.\nHowever, determining the optimal graph structure for a particular learning task\nremains a challenging research problem.\n In this work, we introduce the Graph Lottery Ticket (GLT) Hypothesis - that\nthere is an extremely sparse backbone for every graph, and that graph learning\nalgorithms attain comparable performance when trained on that subgraph as on\nthe full graph. We identify and systematically study 8 key metrics of interest\nthat directly influence the performance of graph learning algorithms.\nSubsequently, we define the notion of a \"winning ticket\" for graph structure -\nan extremely sparse subset of edges that can deliver a robust approximation of\nthe entire graph's performance. We propose a straightforward and efficient\nalgorithm for finding these GLTs in arbitrary graphs. Empirically, we observe\nthat performance of different graph learning algorithms can be matched or even\nexceeded on graphs with the average degree as low as 5.", + "authors": "Anton Tsitsulin, Bryan Perozzi", + "published": "2023-12-08", + "updated": "2023-12-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.14002v1", + "title": "Bi-Level Graph Neural Networks for Drug-Drug Interaction Prediction", + "abstract": "We introduce Bi-GNN for modeling biological link prediction tasks such as\ndrug-drug interaction (DDI) and protein-protein interaction (PPI). Taking\ndrug-drug interaction as an example, existing methods using machine learning\neither only utilize the link structure between drugs without using the graph\nrepresentation of each drug molecule, or only leverage the individual drug\ncompound structures without using graph structure for the higher-level DDI\ngraph. The key idea of our method is to fundamentally view the data as a\nbi-level graph, where the highest level graph represents the interaction\nbetween biological entities (interaction graph), and each biological entity\nitself is further expanded to its intrinsic graph representation\n(representation graphs), where the graph is either flat like a drug compound or\nhierarchical like a protein with amino acid level graph, secondary structure,\ntertiary structure, etc. Our model not only allows the usage of information\nfrom both the high-level interaction graph and the low-level representation\ngraphs, but also offers a baseline for future research opportunities to address\nthe bi-level nature of the data.", + "authors": "Yunsheng Bai, Ken Gu, Yizhou Sun, Wei Wang", + "published": "2020-06-11", + "updated": "2020-06-11", + "primary_cat": "cs.CE", + "cats": [ + "cs.CE", + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1905.06393v1", + "title": "IPC: A Benchmark Data Set for Learning with Graph-Structured Data", + "abstract": "Benchmark data sets are an indispensable ingredient of the evaluation of\ngraph-based machine learning methods. We release a new data set, compiled from\nInternational Planning Competitions (IPC), for benchmarking graph\nclassification, regression, and related tasks. Apart from the graph\nconstruction (based on AI planning problems) that is interesting in its own\nright, the data set possesses distinctly different characteristics from\npopularly used benchmarks. The data set, named IPC, consists of two\nself-contained versions, grounded and lifted, both including graphs of large\nand skewedly distributed sizes, posing substantial challenges for the\ncomputation of graph models such as graph kernels and graph neural networks.\nThe graphs in this data set are directed and the lifted version is acyclic,\noffering the opportunity of benchmarking specialized models for directed\n(acyclic) structures. Moreover, the graph generator and the labeling are\ncomputer programmed; thus, the data set may be extended easily if a larger\nscale is desired. The data set is accessible from\n\\url{https://github.com/IBM/IPC-graph-data}.", + "authors": "Patrick Ferber, Tengfei Ma, Siyu Huo, Jie Chen, Michael Katz", + "published": "2019-05-15", + "updated": "2019-05-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2202.10688v2", + "title": "Graph Lifelong Learning: A Survey", + "abstract": "Graph learning is a popular approach for performing machine learning on\ngraph-structured data. It has revolutionized the machine learning ability to\nmodel graph data to address downstream tasks. Its application is wide due to\nthe availability of graph data ranging from all types of networks to\ninformation systems. Most graph learning methods assume that the graph is\nstatic and its complete structure is known during training. This limits their\napplicability since they cannot be applied to problems where the underlying\ngraph grows over time and/or new tasks emerge incrementally. Such applications\nrequire a lifelong learning approach that can learn the graph continuously and\naccommodate new information whilst retaining previously learned knowledge.\nLifelong learning methods that enable continuous learning in regular domains\nlike images and text cannot be directly applied to continuously evolving graph\ndata, due to its irregular structure. As a result, graph lifelong learning is\ngaining attention from the research community. This survey paper provides a\ncomprehensive overview of recent advancements in graph lifelong learning,\nincluding the categorization of existing methods, and the discussions of\npotential applications and open research problems.", + "authors": "Falih Gozi Febrinanto, Feng Xia, Kristen Moore, Chandra Thapa, Charu Aggarwal", + "published": "2022-02-22", + "updated": "2022-11-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "68T07, 68T05", + "I.2.6" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2402.16374v2", + "title": "Graph Learning under Distribution Shifts: A Comprehensive Survey on Domain Adaptation, Out-of-distribution, and Continual Learning", + "abstract": "Graph learning plays a pivotal role and has gained significant attention in\nvarious application scenarios, from social network analysis to recommendation\nsystems, for its effectiveness in modeling complex data relations represented\nby graph structural data. In reality, the real-world graph data typically show\ndynamics over time, with changing node attributes and edge structure, leading\nto the severe graph data distribution shift issue. This issue is compounded by\nthe diverse and complex nature of distribution shifts, which can significantly\nimpact the performance of graph learning methods in degraded generalization and\nadaptation capabilities, posing a substantial challenge to their effectiveness.\nIn this survey, we provide a comprehensive review and summary of the latest\napproaches, strategies, and insights that address distribution shifts within\nthe context of graph learning. Concretely, according to the observability of\ndistributions in the inference stage and the availability of sufficient\nsupervision information in the training stage, we categorize existing graph\nlearning methods into several essential scenarios, including graph domain\nadaptation learning, graph out-of-distribution learning, and graph continual\nlearning. For each scenario, a detailed taxonomy is proposed, with specific\ndescriptions and discussions of existing progress made in distribution-shifted\ngraph learning. Additionally, we discuss the potential applications and future\ndirections for graph learning under distribution shifts with a systematic\nanalysis of the current state in this field. The survey is positioned to\nprovide general guidance for the development of effective graph learning\nalgorithms in handling graph distribution shifts, and to stimulate future\nresearch and advancements in this area.", + "authors": "Man Wu, Xin Zheng, Qin Zhang, Xiao Shen, Xiong Luo, Xingquan Zhu, Shirui Pan", + "published": "2024-02-26", + "updated": "2024-03-07", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1912.10206v1", + "title": "How Robust Are Graph Neural Networks to Structural Noise?", + "abstract": "Graph neural networks (GNNs) are an emerging model for learning graph\nembeddings and making predictions on graph structured data. However, robustness\nof graph neural networks is not yet well-understood. In this work, we focus on\nnode structural identity predictions, where a representative GNN model is able\nto achieve near-perfect accuracy. We also show that the same GNN model is not\nrobust to addition of structural noise, through a controlled dataset and set of\nexperiments. Finally, we show that under the right conditions, graph-augmented\ntraining is capable of significantly improving robustness to structural noise.", + "authors": "James Fox, Sivasankaran Rajamanickam", + "published": "2019-12-21", + "updated": "2019-12-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2204.01855v2", + "title": "A Survey on Graph Representation Learning Methods", + "abstract": "Graphs representation learning has been a very active research area in recent\nyears. The goal of graph representation learning is to generate graph\nrepresentation vectors that capture the structure and features of large graphs\naccurately. This is especially important because the quality of the graph\nrepresentation vectors will affect the performance of these vectors in\ndownstream tasks such as node classification, link prediction and anomaly\ndetection. Many techniques are proposed for generating effective graph\nrepresentation vectors. Two of the most prevalent categories of graph\nrepresentation learning are graph embedding methods without using graph neural\nnets (GNN), which we denote as non-GNN based graph embedding methods, and graph\nneural nets (GNN) based methods. Non-GNN graph embedding methods are based on\ntechniques such as random walks, temporal point processes and neural network\nlearning methods. GNN-based methods, on the other hand, are the application of\ndeep learning on graph data. In this survey, we provide an overview of these\ntwo categories and cover the current state-of-the-art methods for both static\nand dynamic graphs. Finally, we explore some open and ongoing research\ndirections for future work.", + "authors": "Shima Khoshraftar, Aijun An", + "published": "2022-04-04", + "updated": "2022-06-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.06679v2", + "title": "deepstruct -- linking deep learning and graph theory", + "abstract": "deepstruct connects deep learning models and graph theory such that different\ngraph structures can be imposed on neural networks or graph structures can be\nextracted from trained neural network models. For this, deepstruct provides\ndeep neural network models with different restrictions which can be created\nbased on an initial graph. Further, tools to extract graph structures from\ntrained models are available. This step of extracting graphs can be\ncomputationally expensive even for models of just a few dozen thousand\nparameters and poses a challenging problem. deepstruct supports research in\npruning, neural architecture search, automated network design and structure\nanalysis of neural networks.", + "authors": "Julian Stier, Michael Granitzer", + "published": "2021-11-12", + "updated": "2021-12-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE", + "I.2.0; F.0" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2203.09205v1", + "title": "SoK: Differential Privacy on Graph-Structured Data", + "abstract": "In this work, we study the applications of differential privacy (DP) in the\ncontext of graph-structured data. We discuss the formulations of DP applicable\nto the publication of graphs and their associated statistics as well as machine\nlearning on graph-based data, including graph neural networks (GNNs). The\nformulation of DP in the context of graph-structured data is difficult, as\nindividual data points are interconnected (often non-linearly or sparsely).\nThis connectivity complicates the computation of individual privacy loss in\ndifferentially private learning. The problem is exacerbated by an absence of a\nsingle, well-established formulation of DP in graph settings. This issue\nextends to the domain of GNNs, rendering private machine learning on\ngraph-structured data a challenging task. A lack of prior systematisation work\nmotivated us to study graph-based learning from a privacy perspective. In this\nwork, we systematise different formulations of DP on graphs, discuss challenges\nand promising applications, including the GNN domain. We compare and separate\nworks into graph analysis tasks and graph learning tasks with GNNs. Finally, we\nconclude our work with a discussion of open questions and potential directions\nfor further research in this area.", + "authors": "Tamara T. Mueller, Dmitrii Usynin, Johannes C. Paetzold, Daniel Rueckert, Georgios Kaissis", + "published": "2022-03-17", + "updated": "2022-03-17", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2209.07817v2", + "title": "SPGP: Structure Prototype Guided Graph Pooling", + "abstract": "While graph neural networks (GNNs) have been successful for node\nclassification tasks and link prediction tasks in graph, learning graph-level\nrepresentations still remains a challenge. For the graph-level representation,\nit is important to learn both representation of neighboring nodes, i.e.,\naggregation, and graph structural information. A number of graph pooling\nmethods have been developed for this goal. However, most of the existing\npooling methods utilize k-hop neighborhood without considering explicit\nstructural information in a graph. In this paper, we propose Structure\nPrototype Guided Pooling (SPGP) that utilizes prior graph structures to\novercome the limitation. SPGP formulates graph structures as learnable\nprototype vectors and computes the affinity between nodes and prototype\nvectors. This leads to a novel node scoring scheme that prioritizes informative\nnodes while encapsulating the useful structures of the graph. Our experimental\nresults show that SPGP outperforms state-of-the-art graph pooling methods on\ngraph classification benchmark datasets in both accuracy and scalability.", + "authors": "Sangseon Lee, Dohoon Lee, Yinhua Piao, Sun Kim", + "published": "2022-09-16", + "updated": "2023-03-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2101.06861v3", + "title": "Discrete Graph Structure Learning for Forecasting Multiple Time Series", + "abstract": "Time series forecasting is an extensively studied subject in statistics,\neconomics, and computer science. Exploration of the correlation and causation\namong the variables in a multivariate time series shows promise in enhancing\nthe performance of a time series model. When using deep neural networks as\nforecasting models, we hypothesize that exploiting the pairwise information\namong multiple (multivariate) time series also improves their forecast. If an\nexplicit graph structure is known, graph neural networks (GNNs) have been\ndemonstrated as powerful tools to exploit the structure. In this work, we\npropose learning the structure simultaneously with the GNN if the graph is\nunknown. We cast the problem as learning a probabilistic graph model through\noptimizing the mean performance over the graph distribution. The distribution\nis parameterized by a neural network so that discrete graphs can be sampled\ndifferentiably through reparameterization. Empirical evaluations show that our\nmethod is simpler, more efficient, and better performing than a recently\nproposed bilevel learning approach for graph structure learning, as well as a\nbroad array of forecasting models, either deep or non-deep learning based, and\ngraph or non-graph based.", + "authors": "Chao Shang, Jie Chen, Jinbo Bi", + "published": "2021-01-18", + "updated": "2021-04-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2003.04508v3", + "title": "Unsupervised Graph Embedding via Adaptive Graph Learning", + "abstract": "Graph autoencoders (GAEs) are powerful tools in representation learning for\ngraph embedding. However, the performance of GAEs is very dependent on the\nquality of the graph structure, i.e., of the adjacency matrix. In other words,\nGAEs would perform poorly when the adjacency matrix is incomplete or be\ndisturbed. In this paper, two novel unsupervised graph embedding methods,\nunsupervised graph embedding via adaptive graph learning (BAGE) and\nunsupervised graph embedding via variational adaptive graph learning (VBAGE)\nare proposed. The proposed methods expand the application range of GAEs on\ngraph embedding, i.e, on the general datasets without graph structure.\nMeanwhile, the adaptive learning mechanism can initialize the adjacency matrix\nwithout be affected by the parameter. Besides that, the latent representations\nare embedded in the laplacian graph structure to preserve the topology\nstructure of the graph in the vector space. Moreover, the adjacency matrix can\nbe self-learned for better embedding performance when the original graph\nstructure is incomplete. With adaptive learning, the proposed method is much\nmore robust to the graph structure. Experimental studies on several datasets\nvalidate our design and demonstrate that our methods outperform baselines by a\nwide margin in node clustering, node classification, and graph visualization\ntasks.", + "authors": "Rui Zhang, Yunxing Zhang, Xuelong Li", + "published": "2020-03-10", + "updated": "2021-03-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.00876v1", + "title": "Balanced Graph Structure Information for Brain Disease Detection", + "abstract": "Analyzing connections between brain regions of interest (ROI) is vital to\ndetect neurological disorders such as autism or schizophrenia. Recent\nadvancements employ graph neural networks (GNNs) to utilize graph structures in\nbrains, improving detection performances. Current methods use correlation\nmeasures between ROI's blood-oxygen-level-dependent (BOLD) signals to generate\nthe graph structure. Other methods use the training samples to learn the\noptimal graph structure through end-to-end learning. However, implementing\nthose methods independently leads to some issues with noisy data for the\ncorrelation graphs and overfitting problems for the optimal graph. In this\nwork, we proposed Bargrain (balanced graph structure for brains), which models\ntwo graph structures: filtered correlation matrix and optimal sample graph\nusing graph convolution networks (GCNs). This approach aims to get advantages\nfrom both graphs and address the limitations of only relying on a single type\nof structure. Based on our extensive experiment, Bargrain outperforms\nstate-of-the-art methods in classification tasks on brain disease datasets, as\nmeasured by average F1 scores.", + "authors": "Falih Gozi Febrinanto, Mujie Liu, Feng Xia", + "published": "2023-12-30", + "updated": "2023-12-30", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "q-bio.NC" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2007.16002v1", + "title": "Graph Convolutional Networks using Heat Kernel for Semi-supervised Learning", + "abstract": "Graph convolutional networks gain remarkable success in semi-supervised\nlearning on graph structured data. The key to graph-based semisupervised\nlearning is capturing the smoothness of labels or features over nodes exerted\nby graph structure. Previous methods, spectral methods and spatial methods,\ndevote to defining graph convolution as a weighted average over neighboring\nnodes, and then learn graph convolution kernels to leverage the smoothness to\nimprove the performance of graph-based semi-supervised learning. One open\nchallenge is how to determine appropriate neighborhood that reflects relevant\ninformation of smoothness manifested in graph structure. In this paper, we\npropose GraphHeat, leveraging heat kernel to enhance low-frequency filters and\nenforce smoothness in the signal variation on the graph. GraphHeat leverages\nthe local structure of target node under heat diffusion to determine its\nneighboring nodes flexibly, without the constraint of order suffered by\nprevious methods. GraphHeat achieves state-of-the-art results in the task of\ngraph-based semi-supervised classification across three benchmark datasets:\nCora, Citeseer and Pubmed.", + "authors": "Bingbing Xu, Huawei Shen, Qi Cao, Keting Cen, Xueqi Cheng", + "published": "2020-07-27", + "updated": "2020-07-27", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2212.08966v4", + "title": "Graph Learning and Its Advancements on Large Language Models: A Holistic Survey", + "abstract": "Graph learning is a prevalent domain that endeavors to learn the intricate\nrelationships among nodes and the topological structure of graphs. Over the\nyears, graph learning has transcended from graph theory to graph data mining.\nWith the advent of representation learning, it has attained remarkable\nperformance in diverse scenarios. Owing to its extensive application prospects,\ngraph learning attracts copious attention. While some researchers have\naccomplished impressive surveys on graph learning, they failed to connect\nrelated objectives, methods, and applications in a more coherent way. As a\nresult, they did not encompass current ample scenarios and challenging problems\ndue to the rapid expansion of graph learning. Particularly, large language\nmodels have recently had a disruptive effect on human life, but they also show\nrelative weakness in structured scenarios. The question of how to make these\nmodels more powerful with graph learning remains open. Our survey focuses on\nthe most recent advancements in integrating graph learning with pre-trained\nlanguage models, specifically emphasizing their application within the domain\nof large language models. Different from previous surveys on graph learning, we\nprovide a holistic review that analyzes current works from the perspective of\ngraph structure, and discusses the latest applications, trends, and challenges\nin graph learning. Specifically, we commence by proposing a taxonomy and then\nsummarize the methods employed in graph learning. We then provide a detailed\nelucidation of mainstream applications. Finally, we propose future directions.", + "authors": "Shaopeng Wei, Yu Zhao, Xingyan Chen, Qing Li, Fuzhen Zhuang, Ji Liu, Fuji Ren, Gang Kou", + "published": "2022-12-17", + "updated": "2023-11-18", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.11264v1", + "title": "GraphGLOW: Universal and Generalizable Structure Learning for Graph Neural Networks", + "abstract": "Graph structure learning is a well-established problem that aims at\noptimizing graph structures adaptive to specific graph datasets to help message\npassing neural networks (i.e., GNNs) to yield effective and robust node\nembeddings. However, the common limitation of existing models lies in the\nunderlying \\textit{closed-world assumption}: the testing graph is the same as\nthe training graph. This premise requires independently training the structure\nlearning model from scratch for each graph dataset, which leads to prohibitive\ncomputation costs and potential risks for serious over-fitting. To mitigate\nthese issues, this paper explores a new direction that moves forward to learn a\nuniversal structure learning model that can generalize across graph datasets in\nan open world. We first introduce the mathematical definition of this novel\nproblem setting, and describe the model formulation from a probabilistic\ndata-generative aspect. Then we devise a general framework that coordinates a\nsingle graph-shared structure learner and multiple graph-specific GNNs to\ncapture the generalizable patterns of optimal message-passing topology across\ndatasets. The well-trained structure learner can directly produce adaptive\nstructures for unseen target graphs without any fine-tuning. Across diverse\ndatasets and various challenging cross-graph generalization protocols, our\nexperiments show that even without training on target graphs, the proposed\nmodel i) significantly outperforms expressive GNNs trained on input\n(non-optimized) topology, and ii) surprisingly performs on par with\nstate-of-the-art models that independently optimize adaptive structures for\nspecific target graphs, with notably orders-of-magnitude acceleration for\ntraining on the target graph.", + "authors": "Wentao Zhao, Qitian Wu, Chenxiao Yang, Junchi Yan", + "published": "2023-06-20", + "updated": "2023-06-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2202.08235v3", + "title": "Data Augmentation for Deep Graph Learning: A Survey", + "abstract": "Graph neural networks, a powerful deep learning tool to model\ngraph-structured data, have demonstrated remarkable performance on numerous\ngraph learning tasks. To address the data noise and data scarcity issues in\ndeep graph learning, the research on graph data augmentation has intensified\nlately. However, conventional data augmentation methods can hardly handle\ngraph-structured data which is defined in non-Euclidean space with\nmulti-modality. In this survey, we formally formulate the problem of graph data\naugmentation and further review the representative techniques and their\napplications in different deep graph learning problems. Specifically, we first\npropose a taxonomy for graph data augmentation techniques and then provide a\nstructured review by categorizing the related work based on the augmented\ninformation modalities. Moreover, we summarize the applications of graph data\naugmentation in two representative problems in data-centric deep graph\nlearning: (1) reliable graph learning which focuses on enhancing the utility of\ninput graph as well as the model capacity via graph data augmentation; and (2)\nlow-resource graph learning which targets on enlarging the labeled training\ndata scale through graph data augmentation. For each problem, we also provide a\nhierarchical problem taxonomy and review the existing literature related to\ngraph data augmentation. Finally, we point out promising research directions\nand the challenges in future research.", + "authors": "Kaize Ding, Zhe Xu, Hanghang Tong, Huan Liu", + "published": "2022-02-16", + "updated": "2022-11-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1803.03324v1", + "title": "Learning Deep Generative Models of Graphs", + "abstract": "Graphs are fundamental data structures which concisely capture the relational\nstructure in many important real-world domains, such as knowledge graphs,\nphysical and social interactions, language, and chemistry. Here we introduce a\npowerful new approach for learning generative models over graphs, which can\ncapture both their structure and attributes. Our approach uses graph neural\nnetworks to express probabilistic dependencies among a graph's nodes and edges,\nand can, in principle, learn distributions over any arbitrary graph. In a\nseries of experiments our results show that once trained, our models can\ngenerate good quality samples of both synthetic graphs as well as real\nmolecular graphs, both unconditionally and conditioned on data. Compared to\nbaselines that do not use graph-structured representations, our models often\nperform far better. We also explore key challenges of learning generative\nmodels of graphs, such as how to handle symmetries and ordering of elements\nduring the graph generation process, and offer possible solutions. Our work is\nthe first and most general approach for learning generative models over\narbitrary graphs, and opens new directions for moving away from restrictions of\nvector- and sequence-like knowledge representations, toward more expressive and\nflexible relational data structures.", + "authors": "Yujia Li, Oriol Vinyals, Chris Dyer, Razvan Pascanu, Peter Battaglia", + "published": "2018-03-08", + "updated": "2018-03-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2210.01489v1", + "title": "Generative Models and Learning Algorithms for Core-Periphery Structured Graphs", + "abstract": "We consider core-periphery structured graphs, which are graphs with a group\nof densely and sparsely connected nodes, respectively, referred to as core and\nperiphery nodes. The so-called core score of a node is related to the\nlikelihood of it being a core node. In this paper, we focus on learning the\ncore scores of a graph from its node attributes and connectivity structure. To\nthis end, we propose two classes of probabilistic graphical models: affine and\nnonlinear. First, we describe affine generative models to model the dependence\nof node attributes on its core scores, which determine the graph structure.\nNext, we discuss nonlinear generative models in which the partial correlations\nof node attributes influence the graph structure through latent core scores. We\ndevelop algorithms for inferring the model parameters and core scores of a\ngraph when both the graph structure and node attributes are available. When\nonly the node attributes of graphs are available, we jointly learn a\ncore-periphery structured graph and its core scores. We provide results from\nnumerical experiments on several synthetic and real-world datasets to\ndemonstrate the efficacy of the developed models and algorithms.", + "authors": "Sravanthi Gurugubelli, Sundeep Prabhakar Chepuri", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1911.08776v2", + "title": "Joint Embedding Learning of Educational Knowledge Graphs", + "abstract": "As an efficient model for knowledge organization, the knowledge graph has\nbeen widely adopted in several fields, e.g., biomedicine, sociology, and\neducation. And there is a steady trend of learning embedding representations of\nknowledge graphs to facilitate knowledge graph construction and downstream\ntasks. In general, knowledge graph embedding techniques aim to learn vectorized\nrepresentations which preserve the structural information of the graph. And\nconventional embedding learning models rely on structural relationships among\nentities and relations. However, in educational knowledge graphs, structural\nrelationships are not the focus. Instead, rich literals of the graphs are more\nvaluable. In this paper, we focus on this problem and propose a novel model for\nembedding learning of educational knowledge graphs. Our model considers both\nstructural and literal information and jointly learns embedding\nrepresentations. Three experimental graphs were constructed based on an\neducational knowledge graph which has been applied in real-world teaching. We\nconducted two experiments on the three graphs and other common benchmark\ngraphs. The experimental results proved the effectiveness of our model and its\nsuperiority over other baselines when processing educational knowledge graphs.", + "authors": "Siyu Yao, Ruijie Wang, Shen Sun, Derui Bu, Jun Liu", + "published": "2019-11-20", + "updated": "2019-12-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2103.10837v1", + "title": "Quantum machine learning of graph-structured data", + "abstract": "Graph structures are ubiquitous throughout the natural sciences. Here we\nconsider graph-structured quantum data and describe how to carry out its\nquantum machine learning via quantum neural networks. In particular, we\nconsider training data in the form of pairs of input and output quantum states\nassociated with the vertices of a graph, together with edges encoding\ncorrelations between the vertices. We explain how to systematically exploit\nthis additional graph structure to improve quantum learning algorithms. These\nalgorithms are numerically simulated and exhibit excellent learning behavior.\nScalable quantum implementations of the learning procedures are likely feasible\non the next generation of quantum computing devices.", + "authors": "Kerstin Beer, Megha Khosla, Julius K\u00f6hler, Tobias J. Osborne", + "published": "2021-03-19", + "updated": "2021-03-19", + "primary_cat": "quant-ph", + "cats": [ + "quant-ph" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.04286v1", + "title": "Deep Unsupervised Active Learning on Learnable Graphs", + "abstract": "Recently deep learning has been successfully applied to unsupervised active\nlearning. However, current method attempts to learn a nonlinear transformation\nvia an auto-encoder while ignoring the sample relation, leaving huge room to\ndesign more effective representation learning mechanisms for unsupervised\nactive learning. In this paper, we propose a novel deep unsupervised Active\nLearning model via Learnable Graphs, named ALLG. ALLG benefits from learning\noptimal graph structures to acquire better sample representation and select\nrepresentative samples. To make the learnt graph structure more stable and\neffective, we take into account $k$-nearest neighbor graph as a priori, and\nlearn a relation propagation graph structure. We also incorporate shortcut\nconnections among different layers, which can alleviate the well-known\nover-smoothing problem to some extent. To the best of our knowledge, this is\nthe first attempt to leverage graph structure learning for unsupervised active\nlearning. Extensive experiments performed on six datasets demonstrate the\nefficacy of our method.", + "authors": "Handong Ma, Changsheng Li, Xinchu Shi, Ye Yuan, Guoren Wang", + "published": "2021-11-08", + "updated": "2021-11-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.11390v2", + "title": "Deep Learning for Molecular Graphs with Tiered Graph Autoencoders and Graph Prediction", + "abstract": "Tiered graph autoencoders provide the architecture and mechanisms for\nlearning tiered latent representations and latent spaces for molecular graphs\nthat explicitly represent and utilize groups (e.g., functional groups). This\nenables the utilization and exploration of tiered molecular latent spaces,\neither individually - the node (atom) tier, the group tier, or the graph\n(molecule) tier - or jointly, as well as navigation across the tiers. In this\npaper, we discuss the use of tiered graph autoencoders together with graph\nprediction for molecular graphs. We show features of molecular graphs used, and\ngroups in molecular graphs identified for some sample molecules. We briefly\nreview graph prediction and the QM9 dataset for background information, and\ndiscuss the use of tiered graph embeddings for graph prediction, particularly\nweighted group pooling. We find that functional groups and ring groups\neffectively capture and represent the chemical essence of molecular graphs\n(structures). Further, tiered graph autoencoders and graph prediction together\nprovide effective, efficient and interpretable deep learning for molecular\ngraphs, with the former providing unsupervised, transferable learning and the\nlatter providing supervised, task-optimized learning.", + "authors": "Daniel T. Chang", + "published": "2019-10-24", + "updated": "2021-07-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "q-bio.BM" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2101.00082v1", + "title": "Bosonic Random Walk Networks for Graph Learning", + "abstract": "The development of Graph Neural Networks (GNNs) has led to great progress in\nmachine learning on graph-structured data. These networks operate via diffusing\ninformation across the graph nodes while capturing the structure of the graph.\nRecently there has also seen tremendous progress in quantum computing\ntechniques. In this work, we explore applications of multi-particle quantum\nwalks on diffusing information across graphs. Our model is based on learning\nthe operators that govern the dynamics of quantum random walkers on graphs. We\ndemonstrate the effectiveness of our method on classification and regression\ntasks.", + "authors": "Shiv Shankar, Don Towsley", + "published": "2020-12-31", + "updated": "2020-12-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1611.07308v1", + "title": "Variational Graph Auto-Encoders", + "abstract": "We introduce the variational graph auto-encoder (VGAE), a framework for\nunsupervised learning on graph-structured data based on the variational\nauto-encoder (VAE). This model makes use of latent variables and is capable of\nlearning interpretable latent representations for undirected graphs. We\ndemonstrate this model using a graph convolutional network (GCN) encoder and a\nsimple inner product decoder. Our model achieves competitive results on a link\nprediction task in citation networks. In contrast to most existing models for\nunsupervised learning on graph-structured data and link prediction, our model\ncan naturally incorporate node features, which significantly improves\npredictive performance on a number of benchmark datasets.", + "authors": "Thomas N. Kipf, Max Welling", + "published": "2016-11-21", + "updated": "2016-11-21", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2005.03675v3", + "title": "Machine Learning on Graphs: A Model and Comprehensive Taxonomy", + "abstract": "There has been a surge of recent interest in learning representations for\ngraph-structured data. Graph representation learning methods have generally\nfallen into three main categories, based on the availability of labeled data.\nThe first, network embedding (such as shallow graph embedding or graph\nauto-encoders), focuses on learning unsupervised representations of relational\nstructure. The second, graph regularized neural networks, leverages graphs to\naugment neural network losses with a regularization objective for\nsemi-supervised learning. The third, graph neural networks, aims to learn\ndifferentiable functions over discrete topologies with arbitrary structure.\nHowever, despite the popularity of these areas there has been surprisingly\nlittle work on unifying the three paradigms. Here, we aim to bridge the gap\nbetween graph neural networks, network embedding and graph regularization\nmodels. We propose a comprehensive taxonomy of representation learning methods\nfor graph-structured data, aiming to unify several disparate bodies of work.\nSpecifically, we propose a Graph Encoder Decoder Model (GRAPHEDM), which\ngeneralizes popular algorithms for semi-supervised learning on graphs (e.g.\nGraphSage, Graph Convolutional Networks, Graph Attention Networks), and\nunsupervised learning of graph representations (e.g. DeepWalk, node2vec, etc)\ninto a single consistent approach. To illustrate the generality of this\napproach, we fit over thirty existing methods into this framework. We believe\nthat this unifying view both provides a solid foundation for understanding the\nintuition behind these methods, and enables future research in the area.", + "authors": "Ines Chami, Sami Abu-El-Haija, Bryan Perozzi, Christopher R\u00e9, Kevin Murphy", + "published": "2020-05-07", + "updated": "2022-04-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.15665v1", + "title": "Learnability of a hybrid quantum-classical neural network for graph-structured quantum data", + "abstract": "Classical data with graph structure always exists when dealing with many\nreal-world problems. In parallel, quantum data with graph structure also need\nto be investigated since they are always produced by structured quantum data\nsources.In this paper, we make use of a hybrid quantum-classical neural network\nwith deep residual learning (Res-HQCNN) to learn graph-structured quantum data.\nSpecifically, based on the special definition of graph-structured quantum data,\nwe first find suitable cost functions so that Res-HQCNN can learn both\nsemisupervised quantum data with or without graphs. Moreover, the training\nalgorithm of Res-HQCNN for graph-structured training data is given in detail.\nNext, in order to show the learning ability of Res-HQCNN,we perform extensive\nexperiments to show that the using of information about graph structures for\nquantum data can lead to better learning efficiency compared with the state of\nthe arts. At the same time, we also design comparable experiments to explain\nthat the using of residual learning can also bring better performance when\ntraining for deep quantum neural networks.", + "authors": "Yan-Ying Liang, Si-Le Tang, Zhe-Hao Yi, Hao-Zhen Si-Tu, Zhu-Jun Zheng", + "published": "2024-01-28", + "updated": "2024-01-28", + "primary_cat": "quant-ph", + "cats": [ + "quant-ph" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1902.10042v2", + "title": "Graph Neural Processes: Towards Bayesian Graph Neural Networks", + "abstract": "We introduce Graph Neural Processes (GNP), inspired by the recent work in\nconditional and latent neural processes. A Graph Neural Process is defined as a\nConditional Neural Process that operates on arbitrary graph data. It takes\nfeatures of sparsely observed context points as input, and outputs a\ndistribution over target points. We demonstrate graph neural processes in edge\nimputation and discuss benefits and drawbacks of the method for other\napplication areas. One major benefit of GNPs is the ability to quantify\nuncertainty in deep learning on graph structures. An additional benefit of this\nmethod is the ability to extend graph neural networks to inputs of dynamic\nsized graphs.", + "authors": "Andrew Carr, David Wingate", + "published": "2019-02-26", + "updated": "2019-10-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2204.05258v1", + "title": "Multi-view graph structure learning using subspace merging on Grassmann manifold", + "abstract": "Many successful learning algorithms have been recently developed to represent\ngraph-structured data. For example, Graph Neural Networks (GNNs) have achieved\nconsiderable successes in various tasks such as node classification, graph\nclassification, and link prediction. However, these methods are highly\ndependent on the quality of the input graph structure. One used approach to\nalleviate this problem is to learn the graph structure instead of relying on a\nmanually designed graph. In this paper, we introduce a new graph structure\nlearning approach using multi-view learning, named MV-GSL (Multi-View Graph\nStructure Learning), in which we aggregate different graph structure learning\nmethods using subspace merging on Grassmann manifold to improve the quality of\nthe learned graph structures. Extensive experiments are performed to evaluate\nthe effectiveness of the proposed method on two benchmark datasets, Cora and\nCiteseer. Our experiments show that the proposed method has promising\nperformance compared to single and other combined graph structure learning\nmethods.", + "authors": "Razieh Ghiasi, Hossein Amirkhani, Alireza Bosaghzadeh", + "published": "2022-04-11", + "updated": "2022-04-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.09671v1", + "title": "DDGK: Learning Graph Representations for Deep Divergence Graph Kernels", + "abstract": "Can neural networks learn to compare graphs without feature engineering? In\nthis paper, we show that it is possible to learn representations for graph\nsimilarity with neither domain knowledge nor supervision (i.e.\\ feature\nengineering or labeled graphs). We propose Deep Divergence Graph Kernels, an\nunsupervised method for learning representations over graphs that encodes a\nrelaxed notion of graph isomorphism. Our method consists of three parts. First,\nwe learn an encoder for each anchor graph to capture its structure. Second, for\neach pair of graphs, we train a cross-graph attention network which uses the\nnode representations of an anchor graph to reconstruct another graph. This\napproach, which we call isomorphism attention, captures how well the\nrepresentations of one graph can encode another. We use the attention-augmented\nencoder's predictions to define a divergence score for each pair of graphs.\nFinally, we construct an embedding space for all graphs using these pair-wise\ndivergence scores.\n Unlike previous work, much of which relies on 1) supervision, 2) domain\nspecific knowledge (e.g. a reliance on Weisfeiler-Lehman kernels), and 3) known\nnode alignment, our unsupervised method jointly learns node representations,\ngraph representations, and an attention-based alignment between graphs.\n Our experimental results show that Deep Divergence Graph Kernels can learn an\nunsupervised alignment between graphs, and that the learned representations\nachieve competitive results when used as features on a number of challenging\ngraph classification tasks. Furthermore, we illustrate how the learned\nattention allows insight into the the alignment of sub-structures across\ngraphs.", + "authors": "Rami Al-Rfou, Dustin Zelle, Bryan Perozzi", + "published": "2019-04-21", + "updated": "2019-04-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.IR", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2309.10134v1", + "title": "GDM: Dual Mixup for Graph Classification with Limited Supervision", + "abstract": "Graph Neural Networks (GNNs) require a large number of labeled graph samples\nto obtain good performance on the graph classification task. The performance of\nGNNs degrades significantly as the number of labeled graph samples decreases.\nTo reduce the annotation cost, it is therefore important to develop graph\naugmentation methods that can generate new graph instances to increase the size\nand diversity of the limited set of available labeled graph samples. In this\nwork, we propose a novel mixup-based graph augmentation method, Graph Dual\nMixup (GDM), that leverages both functional and structural information of the\ngraph instances to generate new labeled graph samples. GDM employs a graph\nstructural auto-encoder to learn structural embeddings of the graph samples,\nand then applies mixup to the structural information of the graphs in the\nlearned structural embedding space and generates new graph structures from the\nmixup structural embeddings. As for the functional information, GDM applies\nmixup directly to the input node features of the graph samples to generate\nfunctional node feature information for new mixup graph instances. Jointly, the\ngenerated input node features and graph structures yield new graph samples\nwhich can supplement the set of original labeled graphs. Furthermore, we\npropose two novel Balanced Graph Sampling methods to enhance the balanced\ndifficulty and diversity for the generated graph samples. Experimental results\non the benchmark datasets demonstrate that our proposed method substantially\noutperforms the state-of-the-art graph augmentation methods when the labeled\ngraphs are scarce.", + "authors": "Abdullah Alchihabi, Yuhong Guo", + "published": "2023-09-18", + "updated": "2023-09-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.07294v1", + "title": "Graph Data Condensation via Self-expressive Graph Structure Reconstruction", + "abstract": "With the increasing demands of training graph neural networks (GNNs) on\nlarge-scale graphs, graph data condensation has emerged as a critical technique\nto relieve the storage and time costs during the training phase. It aims to\ncondense the original large-scale graph to a much smaller synthetic graph while\npreserving the essential information necessary for efficiently training a\ndownstream GNN. However, existing methods concentrate either on optimizing node\nfeatures exclusively or endeavor to independently learn node features and the\ngraph structure generator. They could not explicitly leverage the information\nof the original graph structure and failed to construct an interpretable graph\nstructure for the synthetic dataset. To address these issues, we introduce a\nnovel framework named \\textbf{G}raph Data \\textbf{C}ondensation via\n\\textbf{S}elf-expressive Graph Structure \\textbf{R}econstruction\n(\\textbf{GCSR}). Our method stands out by (1) explicitly incorporating the\noriginal graph structure into the condensing process and (2) capturing the\nnuanced interdependencies between the condensed nodes by reconstructing an\ninterpretable self-expressive graph structure. Extensive experiments and\ncomprehensive analysis validate the efficacy of the proposed method across\ndiverse GNN models and datasets. Our code is available at\nhttps://www.dropbox.com/scl/fi/2aonyp5ln5gisdqtjimu8/GCSR.zip?rlkey=11cuwfpsf54wxiiktu0klud0x&dl=0", + "authors": "Zhanyu Liu, Chaolv Zeng, Guanjie Zheng", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2003.03892v2", + "title": "COPT: Coordinated Optimal Transport for Graph Sketching", + "abstract": "We introduce COPT, a novel distance metric between graphs defined via an\noptimization routine, computing a coordinated pair of optimal transport maps\nsimultaneously. This gives an unsupervised way to learn general-purpose graph\nrepresentation, applicable to both graph sketching and graph comparison. COPT\ninvolves simultaneously optimizing dual transport plans, one between the\nvertices of two graphs, and another between graph signal probability\ndistributions. We show theoretically that our method preserves important global\nstructural information on graphs, in particular spectral information, and\nanalyze connections to existing studies. Empirically, COPT outperforms state of\nthe art methods in graph classification on both synthetic and real datasets.", + "authors": "Yihe Dong, Will Sawin", + "published": "2020-03-09", + "updated": "2020-06-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DS", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2108.01660v3", + "title": "Graph Neural Networks With Lifting-based Adaptive Graph Wavelets", + "abstract": "Spectral-based graph neural networks (SGNNs) have been attracting increasing\nattention in graph representation learning. However, existing SGNNs are limited\nin implementing graph filters with rigid transforms (e.g., graph Fourier or\npredefined graph wavelet transforms) and cannot adapt to signals residing on\ngraphs and tasks at hand. In this paper, we propose a novel class of graph\nneural networks that realizes graph filters with adaptive graph wavelets.\nSpecifically, the adaptive graph wavelets are learned with neural\nnetwork-parameterized lifting structures, where structure-aware attention-based\nlifting operations (i.e., prediction and update operations) are developed to\njointly consider graph structures and node features. We propose to lift based\non diffusion wavelets to alleviate the structural information loss induced by\npartitioning non-bipartite graphs. By design, the locality and sparsity of the\nresulting wavelet transform as well as the scalability of the lifting structure\nare guaranteed. We further derive a soft-thresholding filtering operation by\nlearning sparse graph representations in terms of the learned wavelets,\nyielding a localized, efficient, and scalable wavelet-based graph filters. To\nensure that the learned graph representations are invariant to node\npermutations, a layer is employed at the input of the networks to reorder the\nnodes according to their local topology information. We evaluate the proposed\nnetworks in both node-level and graph-level representation learning tasks on\nbenchmark citation and bioinformatics graph datasets. Extensive experiments\ndemonstrate the superiority of the proposed networks over existing SGNNs in\nterms of accuracy, efficiency, and scalability.", + "authors": "Mingxing Xu, Wenrui Dai, Chenglin Li, Junni Zou, Hongkai Xiong, Pascal Frossard", + "published": "2021-08-03", + "updated": "2022-01-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2305.15843v1", + "title": "TabGSL: Graph Structure Learning for Tabular Data Prediction", + "abstract": "This work presents a novel approach to tabular data prediction leveraging\ngraph structure learning and graph neural networks. Despite the prevalence of\ntabular data in real-world applications, traditional deep learning methods\noften overlook the potentially valuable associations between data instances.\nSuch associations can offer beneficial insights for classification tasks, as\ninstances may exhibit similar patterns of correlations among features and\ntarget labels. This information can be exploited by graph neural networks,\nnecessitating robust graph structures. However, existing studies primarily\nfocus on improving graph structure from noisy data, largely neglecting the\npossibility of deriving graph structures from tabular data. We present a novel\nsolution, Tabular Graph Structure Learning (TabGSL), to enhance tabular data\nprediction by simultaneously learning instance correlation and feature\ninteraction within a unified framework. This is achieved through a proposed\ngraph contrastive learning module, along with transformer-based feature\nextractor and graph neural network. Comprehensive experiments conducted on 30\nbenchmark tabular datasets demonstrate that TabGSL markedly outperforms both\ntree-based models and recent deep learning-based tabular models. Visualizations\nof the learned instance embeddings further substantiate the effectiveness of\nTabGSL.", + "authors": "Jay Chiehen Liao, Cheng-Te Li", + "published": "2023-05-25", + "updated": "2023-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2304.13195v1", + "title": "Connector 0.5: A unified framework for graph representation learning", + "abstract": "Graph representation learning models aim to represent the graph structure and\nits features into low-dimensional vectors in a latent space, which can benefit\nvarious downstream tasks, such as node classification and link prediction. Due\nto its powerful graph data modelling capabilities, various graph embedding\nmodels and libraries have been proposed to learn embeddings and help\nresearchers ease conducting experiments. In this paper, we introduce a novel\ngraph representation framework covering various graph embedding models, ranging\nfrom shallow to state-of-the-art models, namely Connector. First, we consider\ngraph generation by constructing various types of graphs with different\nstructural relations, including homogeneous, signed, heterogeneous, and\nknowledge graphs. Second, we introduce various graph representation learning\nmodels, ranging from shallow to deep graph embedding models. Finally, we plan\nto build an efficient open-source framework that can provide deep graph\nembedding models to represent structural relations in graphs. The framework is\navailable at https://github.com/NSLab-CUK/Connector.", + "authors": "Thanh Sang Nguyen, Jooho Lee, Van Thuy Hoang, O-Joun Lee", + "published": "2023-04-25", + "updated": "2023-04-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.08201v1", + "title": "Graph Laplacian Learning with Exponential Family Noise", + "abstract": "A common challenge in applying graph machine learning methods is that the\nunderlying graph of a system is often unknown. Although different graph\ninference methods have been proposed for continuous graph signals, inferring\nthe graph structure underlying other types of data, such as discrete counts, is\nunder-explored. In this paper, we generalize a graph signal processing (GSP)\nframework for learning a graph from smooth graph signals to the exponential\nfamily noise distribution to model various data types. We propose an\nalternating algorithm that estimates the graph Laplacian as well as the\nunobserved smooth representation from the noisy signals. We demonstrate in\nsynthetic and real-world data that our new algorithm outperforms competing\nLaplacian estimation methods under noise model mismatch.", + "authors": "Changhao Shi, Gal Mishne", + "published": "2023-06-14", + "updated": "2023-06-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2307.02126v1", + "title": "Robust Graph Structure Learning with the Alignment of Features and Adjacency Matrix", + "abstract": "To improve the robustness of graph neural networks (GNN), graph structure\nlearning (GSL) has attracted great interest due to the pervasiveness of noise\nin graph data. Many approaches have been proposed for GSL to jointly learn a\nclean graph structure and corresponding representations. To extend the previous\nwork, this paper proposes a novel regularized GSL approach, particularly with\nan alignment of feature information and graph information, which is motivated\nmainly by our derived lower bound of node-level Rademacher complexity for GNNs.\nAdditionally, our proposed approach incorporates sparse dimensional reduction\nto leverage low-dimensional node features that are relevant to the graph\nstructure. To evaluate the effectiveness of our approach, we conduct\nexperiments on real-world graphs. The results demonstrate that our proposed GSL\nmethod outperforms several competitive baselines, especially in scenarios where\nthe graph structures are heavily affected by noise. Overall, our research\nhighlights the importance of integrating feature and graph information\nalignment in GSL, as inspired by our derived theoretical result, and showcases\nthe superiority of our approach in handling noisy graph structures through\ncomprehensive experiments on real-world datasets.", + "authors": "Shaogao Lv, Gang Wen, Shiyu Liu, Linsen Wei, Ming Li", + "published": "2023-07-05", + "updated": "2023-07-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.15239v1", + "title": "Generating the Graph Gestalt: Kernel-Regularized Graph Representation Learning", + "abstract": "Recent work on graph generative models has made remarkable progress towards\ngenerating increasingly realistic graphs, as measured by global graph features\nsuch as degree distribution, density, and clustering coefficients. Deep\ngenerative models have also made significant advances through better modelling\nof the local correlations in the graph topology, which have been very useful\nfor predicting unobserved graph components, such as the existence of a link or\nthe class of a node, from nearby observed graph components. A complete\nscientific understanding of graph data should address both global and local\nstructure. In this paper, we propose a joint model for both as complementary\nobjectives in a graph VAE framework. Global structure is captured by\nincorporating graph kernels in a probabilistic model whose loss function is\nclosely related to the maximum mean discrepancy(MMD) between the global\nstructures of the reconstructed and the input graphs. The ELBO objective\nderived from the model regularizes a standard local link reconstruction term\nwith an MMD term. Our experiments demonstrate a significant improvement in the\nrealism of the generated graph structures, typically by 1-2 orders of magnitude\nof graph structure metrics, compared to leading graph VAEand GAN models. Local\nlink reconstruction improves as well in many cases.", + "authors": "Kiarash Zahirnia, Ankita Sakhuja, Oliver Schulte, Parmis Nadaf, Ke Li, Xia Hu", + "published": "2021-06-29", + "updated": "2021-06-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1811.09971v1", + "title": "Graph Learning-Convolutional Networks", + "abstract": "Recently, graph Convolutional Neural Networks (graph CNNs) have been widely\nused for graph data representation and semi-supervised learning tasks. However,\nexisting graph CNNs generally use a fixed graph which may be not optimal for\nsemi-supervised learning tasks. In this paper, we propose a novel Graph\nLearning-Convolutional Network (GLCN) for graph data representation and\nsemi-supervised learning. The aim of GLCN is to learn an optimal graph\nstructure that best serves graph CNNs for semi-supervised learning by\nintegrating both graph learning and graph convolution together in a unified\nnetwork architecture. The main advantage is that in GLCN, both given labels and\nthe estimated labels are incorporated and thus can provide useful 'weakly'\nsupervised information to refine (or learn) the graph construction and also to\nfacilitate the graph convolution operation in GLCN for unknown label\nestimation. Experimental results on seven benchmarks demonstrate that GLCN\nsignificantly outperforms state-of-the-art traditional fixed structure based\ngraph CNNs.", + "authors": "Bo Jiang, Ziyan Zhang, Doudou Lin, Jin Tang", + "published": "2018-11-25", + "updated": "2018-11-25", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.01743v1", + "title": "Graph Generation with Variational Recurrent Neural Network", + "abstract": "Generating graph structures is a challenging problem due to the diverse\nrepresentations and complex dependencies among nodes. In this paper, we\nintroduce Graph Variational Recurrent Neural Network (GraphVRNN), a\nprobabilistic autoregressive model for graph generation. Through modeling the\nlatent variables of graph data, GraphVRNN can capture the joint distributions\nof graph structures and the underlying node attributes. We conduct experiments\non the proposed GraphVRNN in both graph structure learning and attribute\ngeneration tasks. The evaluation results show that the variational component\nallows our network to model complicated distributions, as well as generate\nplausible structures and node attributes.", + "authors": "Shih-Yang Su, Hossein Hajimirsadeghi, Greg Mori", + "published": "2019-10-02", + "updated": "2019-10-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1609.04350v2", + "title": "Time-Variant Graph Classification", + "abstract": "Graphs are commonly used to represent objects, such as images and text, for\npattern classification. In a dynamic world, an object may continuously evolve\nover time, and so does the graph extracted from the underlying object. These\nchanges in graph structure with respect to the temporal order present a new\nrepresentation of the graph, in which an object corresponds to a set of\ntime-variant graphs. In this paper, we formulate a novel time-variant graph\nclassification task and propose a new graph feature, called a graph-shapelet\npattern, for learning and classifying time-variant graphs. Graph-shapelet\npatterns are compact and discriminative graph transformation subsequences. A\ngraph-shapelet pattern can be regarded as a graphical extension of a shapelet\n-- a class of discriminative features designed for vector-based temporal data\nclassification. To discover graph-shapelet patterns, we propose to convert a\ntime-variant graph sequence into time-series data and use the discovered\nshapelets to find graph transformation subsequences as graph-shapelet patterns.\nBy converting each graph-shapelet pattern into a unique tokenized graph\ntransformation sequence, we can measure the similarity between two\ngraph-shapelet patterns and therefore classify time-variant graphs. Experiments\non both synthetic and real-world data demonstrate the superior performance of\nthe proposed algorithms.", + "authors": "Haishuai Wang", + "published": "2016-09-14", + "updated": "2017-06-12", + "primary_cat": "cs.DS", + "cats": [ + "cs.DS" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2404.11869v1", + "title": "Multi-view Graph Structural Representation Learning via Graph Coarsening", + "abstract": "Graph Transformers (GTs) have made remarkable achievements in graph-level\ntasks. However, most existing works regard graph structures as a form of\nguidance or bias for enhancing node representations, which focuses on\nnode-central perspectives and lacks explicit representations of edges and\nstructures. One natural question is, can we treat graph structures node-like as\na whole to learn high-level features? Through experimental analysis, we explore\nthe feasibility of this assumption. Based on our findings, we propose a novel\nmulti-view graph structural representation learning model via graph coarsening\n(MSLgo) on GT architecture for graph classification. Specifically, we build\nthree unique views, original, coarsening, and conversion, to learn a thorough\nstructural representation. We compress loops and cliques via hierarchical\nheuristic graph coarsening and restrict them with well-designed constraints,\nwhich builds the coarsening view to learn high-level interactions between\nstructures. We also introduce line graphs for edge embeddings and switch to\nedge-central perspective to construct the conversion view. Experiments on six\nreal-world datasets demonstrate the improvements of MSLgo over 14 baselines\nfrom various architectures.", + "authors": "Xiaorui Qi, Qijie Bai, Yanlong Wen, Haiwei Zhang, Xiaojie Yuan", + "published": "2024-04-18", + "updated": "2024-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2201.06367v1", + "title": "Towards Unsupervised Deep Graph Structure Learning", + "abstract": "In recent years, graph neural networks (GNNs) have emerged as a successful\ntool in a variety of graph-related applications. However, the performance of\nGNNs can be deteriorated when noisy connections occur in the original graph\nstructures; besides, the dependence on explicit structures prevents GNNs from\nbeing applied to general unstructured scenarios. To address these issues,\nrecently emerged deep graph structure learning (GSL) methods propose to jointly\noptimize the graph structure along with GNN under the supervision of a node\nclassification task. Nonetheless, these methods focus on a supervised learning\nscenario, which leads to several problems, i.e., the reliance on labels, the\nbias of edge distribution, and the limitation on application tasks. In this\npaper, we propose a more practical GSL paradigm, unsupervised graph structure\nlearning, where the learned graph topology is optimized by data itself without\nany external guidance (i.e., labels). To solve the unsupervised GSL problem, we\npropose a novel StrUcture Bootstrapping contrastive LearnIng fraMEwork (SUBLIME\nfor abbreviation) with the aid of self-supervised contrastive learning.\nSpecifically, we generate a learning target from the original data as an\n\"anchor graph\", and use a contrastive loss to maximize the agreement between\nthe anchor graph and the learned graph. To provide persistent guidance, we\ndesign a novel bootstrapping mechanism that upgrades the anchor graph with\nlearned structures during model learning. We also design a series of graph\nlearners and post-processing schemes to model the structures to learn.\nExtensive experiments on eight benchmark datasets demonstrate the significant\neffectiveness of our proposed SUBLIME and high quality of the optimized graphs.", + "authors": "Yixin Liu, Yu Zheng, Daokun Zhang, Hongxu Chen, Hao Peng, Shirui Pan", + "published": "2022-01-17", + "updated": "2022-01-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2206.01152v2", + "title": "Causal Structure Learning: a Combinatorial Perspective", + "abstract": "In this review, we discuss approaches for learning causal structure from\ndata, also called causal discovery. In particular, we focus on approaches for\nlearning directed acyclic graphs (DAGs) and various generalizations which allow\nfor some variables to be unobserved in the available data. We devote special\nattention to two fundamental combinatorial aspects of causal structure\nlearning. First, we discuss the structure of the search space over causal\ngraphs. Second, we discuss the structure of equivalence classes over causal\ngraphs, i.e., sets of graphs which represent what can be learned from\nobservational data alone, and how these equivalence classes can be refined by\nadding interventional data.", + "authors": "Chandler Squires, Caroline Uhler", + "published": "2022-06-02", + "updated": "2022-12-19", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.11883v2", + "title": "Robust Graph Data Learning via Latent Graph Convolutional Representation", + "abstract": "Graph Convolutional Representation (GCR) has achieved impressive performance\nfor graph data representation. However, existing GCR is generally defined on\nthe input fixed graph which may restrict the representation capacity and also\nbe vulnerable to the structural attacks and noises. To address this issue, we\npropose a novel Latent Graph Convolutional Representation (LatGCR) for robust\ngraph data representation and learning. Our LatGCR is derived based on\nreformulating graph convolutional representation from the aspect of graph\nneighborhood reconstruction. Given an input graph $\\textbf{A}$, LatGCR aims to\ngenerate a flexible latent graph $\\widetilde{\\textbf{A}}$ for graph\nconvolutional representation which obviously enhances the representation\ncapacity and also performs robustly w.r.t graph structural attacks and noises.\nMoreover, LatGCR is implemented in a self-supervised manner and thus provides a\nbasic block for both supervised and unsupervised graph learning tasks.\nExperiments on several datasets demonstrate the effectiveness and robustness of\nLatGCR.", + "authors": "Bo Jiang, Ziyan Zhang, Bin Luo", + "published": "2019-04-26", + "updated": "2021-10-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.04923v2", + "title": "Control-based Graph Embeddings with Data Augmentation for Contrastive Learning", + "abstract": "In this paper, we study the problem of unsupervised graph representation\nlearning by harnessing the control properties of dynamical networks defined on\ngraphs. Our approach introduces a novel framework for contrastive learning, a\nwidely prevalent technique for unsupervised representation learning. A crucial\nstep in contrastive learning is the creation of 'augmented' graphs from the\ninput graphs. Though different from the original graphs, these augmented graphs\nretain the original graph's structural characteristics. Here, we propose a\nunique method for generating these augmented graphs by leveraging the control\nproperties of networks. The core concept revolves around perturbing the\noriginal graph to create a new one while preserving the controllability\nproperties specific to networks and graphs. Compared to the existing methods,\nwe demonstrate that this innovative approach enhances the effectiveness of\ncontrastive learning frameworks, leading to superior results regarding the\naccuracy of the classification tasks. The key innovation lies in our ability to\ndecode the network structure using these control properties, opening new\navenues for unsupervised graph representation learning.", + "authors": "Obaid Ullah Ahmad, Anwar Said, Mudassir Shabbir, Waseem Abbas, Xenofon Koutsoukos", + "published": "2024-03-07", + "updated": "2024-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "cs.SY", + "eess.SY" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2402.02321v1", + "title": "Active Learning for Graphs with Noisy Structures", + "abstract": "Graph Neural Networks (GNNs) have seen significant success in tasks such as\nnode classification, largely contingent upon the availability of sufficient\nlabeled nodes. Yet, the excessive cost of labeling large-scale graphs led to a\nfocus on active learning on graphs, which aims for effective data selection to\nmaximize downstream model performance. Notably, most existing methods assume\nreliable graph topology, while real-world scenarios often present noisy graphs.\nGiven this, designing a successful active learning framework for noisy graphs\nis highly needed but challenging, as selecting data for labeling and obtaining\na clean graph are two tasks naturally interdependent: selecting high-quality\ndata requires clean graph structure while cleaning noisy graph structure\nrequires sufficient labeled data. Considering the complexity mentioned above,\nwe propose an active learning framework, GALClean, which has been specifically\ndesigned to adopt an iterative approach for conducting both data selection and\ngraph purification simultaneously with best information learned from the prior\niteration. Importantly, we summarize GALClean as an instance of the\nExpectation-Maximization algorithm, which provides a theoretical understanding\nof its design and mechanisms. This theory naturally leads to an enhanced\nversion, GALClean+. Extensive experiments have demonstrated the effectiveness\nand robustness of our proposed method across various types and levels of noisy\ngraphs.", + "authors": "Hongliang Chi, Cong Qi, Suhang Wang, Yao Ma", + "published": "2024-02-04", + "updated": "2024-02-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2109.11796v1", + "title": "Edge but not Least: Cross-View Graph Pooling", + "abstract": "Graph neural networks have emerged as a powerful model for graph\nrepresentation learning to undertake graph-level prediction tasks. Various\ngraph pooling methods have been developed to coarsen an input graph into a\nsuccinct graph-level representation through aggregating node embeddings\nobtained via graph convolution. However, most graph pooling methods are heavily\nnode-centric and are unable to fully leverage the crucial information contained\nin global graph structure. This paper presents a cross-view graph pooling\n(Co-Pooling) method to better exploit crucial graph structure information. The\nproposed Co-Pooling fuses pooled representations learnt from both node view and\nedge view. Through cross-view interaction, edge-view pooling and node-view\npooling seamlessly reinforce each other to learn more informative graph-level\nrepresentations. Co-Pooling has the advantage of handling various graphs with\ndifferent types of node attributes. Extensive experiments on a total of 15\ngraph benchmark datasets validate the effectiveness of our proposed method,\ndemonstrating its superior performance over state-of-the-art pooling methods on\nboth graph classification and graph regression tasks.", + "authors": "Xiaowei Zhou, Jie Yin, Ivor W. Tsang", + "published": "2021-09-24", + "updated": "2021-09-24", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.03659v1", + "title": "Robust Graph Structure Learning under Heterophily", + "abstract": "Graph is a fundamental mathematical structure in characterizing relations\nbetween different objects and has been widely used on various learning tasks.\nMost methods implicitly assume a given graph to be accurate and complete.\nHowever, real data is inevitably noisy and sparse, which will lead to inferior\nresults. Despite the remarkable success of recent graph representation learning\nmethods, they inherently presume that the graph is homophilic, and largely\noverlook heterophily, where most connected nodes are from different classes. In\nthis regard, we propose a novel robust graph structure learning method to\nachieve a high-quality graph from heterophilic data for downstream tasks. We\nfirst apply a high-pass filter to make each node more distinctive from its\nneighbors by encoding structure information into the node features. Then, we\nlearn a robust graph with an adaptive norm characterizing different levels of\nnoise. Afterwards, we propose a novel regularizer to further refine the graph\nstructure. Clustering and semi-supervised classification experiments on\nheterophilic graphs verify the effectiveness of our method.", + "authors": "Xuanting Xie, Zhao Kang, Wenyu Chen", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2212.04934v1", + "title": "Learning Graph Algorithms With Recurrent Graph Neural Networks", + "abstract": "Classical graph algorithms work well for combinatorial problems that can be\nthoroughly formalized and abstracted. Once the algorithm is derived, it\ngeneralizes to instances of any size. However, developing an algorithm that\nhandles complex structures and interactions in the real world can be\nchallenging. Rather than specifying the algorithm, we can try to learn it from\nthe graph-structured data. Graph Neural Networks (GNNs) are inherently capable\nof working on graph structures; however, they struggle to generalize well, and\nlearning on larger instances is challenging. In order to scale, we focus on a\nrecurrent architecture design that can learn simple graph problems end to end\non smaller graphs and then extrapolate to larger instances. As our main\ncontribution, we identify three essential techniques for recurrent GNNs to\nscale. By using (i) skip connections, (ii) state regularization, and (iii) edge\nconvolutions, we can guide GNNs toward extrapolation. This allows us to train\non small graphs and apply the same model to much larger graphs during\ninference. Moreover, we empirically validate the extrapolation capabilities of\nour GNNs on algorithmic datasets.", + "authors": "Florian Gr\u00f6tschla, Jo\u00ebl Mathys, Roger Wattenhofer", + "published": "2022-12-09", + "updated": "2022-12-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2110.05018v2", + "title": "Time-varying Graph Learning Under Structured Temporal Priors", + "abstract": "This paper endeavors to learn time-varying graphs by using structured\ntemporal priors that assume underlying relations between arbitrary two graphs\nin the graph sequence. Different from many existing chain structure based\nmethods in which the priors like temporal homogeneity can only describe the\nvariations of two consecutive graphs, we propose a structure named\n\\emph{temporal graph} to characterize the underlying real temporal relations.\nUnder this framework, the chain structure is actually a special case of our\ntemporal graph. We further proposed Alternating Direction Method of Multipliers\n(ADMM), a distributed algorithm, to solve the induced optimization problem.\nNumerical experiments demonstrate the superiorities of our method.", + "authors": "Xiang Zhang, Qiao Wang", + "published": "2021-10-11", + "updated": "2022-02-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2109.11898v1", + "title": "Graph Learning Augmented Heterogeneous Graph Neural Network for Social Recommendation", + "abstract": "Social recommendation based on social network has achieved great success in\nimproving the performance of recommendation system. Since social network\n(user-user relations) and user-item interactions are both naturally represented\nas graph-structured data, Graph Neural Networks (GNNs) have thus been widely\napplied for social recommendation. In this work, we propose an end-to-end\nheterogeneous global graph learning framework, namely Graph Learning Augmented\nHeterogeneous Graph Neural Network (GL-HGNN) for social recommendation. GL-HGNN\naims to learn a heterogeneous global graph that makes full use of user-user\nrelations, user-item interactions and item-item similarities in a unified\nperspective. To this end, we design a Graph Learner (GL) method to learn and\noptimize user-user and item-item connections separately. Moreover, we employ a\nHeterogeneous Graph Neural Network (HGNN) to capture the high-order complex\nsemantic relations from our learned heterogeneous global graph. To scale up the\ncomputation of graph learning, we further present the Anchor-based Graph\nLearner (AGL) to reduce computational complexity. Extensive experiments on four\nreal-world datasets demonstrate the effectiveness of our model.", + "authors": "Yiming Zhang, Lingfei Wu, Qi Shen, Yitong Pang, Zhihua Wei, Fangli Xu, Ethan Chang, Bo Long", + "published": "2021-09-24", + "updated": "2021-09-24", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1906.02319v1", + "title": "DEMO-Net: Degree-specific Graph Neural Networks for Node and Graph Classification", + "abstract": "Graph data widely exist in many high-impact applications. Inspired by the\nsuccess of deep learning in grid-structured data, graph neural network models\nhave been proposed to learn powerful node-level or graph-level representation.\nHowever, most of the existing graph neural networks suffer from the following\nlimitations: (1) there is limited analysis regarding the graph convolution\nproperties, such as seed-oriented, degree-aware and order-free; (2) the node's\ndegree-specific graph structure is not explicitly expressed in graph\nconvolution for distinguishing structure-aware node neighborhoods; (3) the\ntheoretical explanation regarding the graph-level pooling schemes is unclear.\n To address these problems, we propose a generic degree-specific graph neural\nnetwork named DEMO-Net motivated by Weisfeiler-Lehman graph isomorphism test\nthat recursively identifies 1-hop neighborhood structures. In order to\nexplicitly capture the graph topology integrated with node attributes, we argue\nthat graph convolution should have three properties: seed-oriented,\ndegree-aware, order-free. To this end, we propose multi-task graph convolution\nwhere each task represents node representation learning for nodes with a\nspecific degree value, thus leading to preserving the degree-specific graph\nstructure. In particular, we design two multi-task learning methods:\ndegree-specific weight and hashing functions for graph convolution. In\naddition, we propose a novel graph-level pooling/readout scheme for learning\ngraph representation provably lying in a degree-specific Hilbert kernel space.\nThe experimental results on several node and graph classification benchmark\ndata sets demonstrate the effectiveness and efficiency of our proposed DEMO-Net\nover state-of-the-art graph neural network models.", + "authors": "Jun Wu, Jingrui He, Jiejun Xu", + "published": "2019-06-05", + "updated": "2019-06-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1905.11691v1", + "title": "Triple2Vec: Learning Triple Embeddings from Knowledge Graphs", + "abstract": "Graph embedding techniques allow to learn high-quality feature vectors from\ngraph structures and are useful in a variety of tasks, from node classification\nto clustering. Existing approaches have only focused on learning feature\nvectors for the nodes in a (knowledge) graph. To the best of our knowledge,\nnone of them has tackled the problem of embedding of graph edges, that is,\nknowledge graph triples. The approaches that are closer to this task have\nfocused on homogeneous graphs involving only one type of edge and obtain edge\nembeddings by applying some operation (e.g., average) on the embeddings of the\nendpoint nodes. The goal of this paper is to introduce Triple2Vec, a new\ntechnique to directly embed edges in (knowledge) graphs. Trple2Vec builds upon\nthree main ingredients. The first is the notion of line graph. The line graph\nof a graph is another graph representing the adjacency between edges of the\noriginal graph. In particular, the nodes of the line graph are the edges of the\noriginal graph. We show that directly applying existing embedding techniques on\nthe nodes of the line graph to learn edge embeddings is not enough in the\ncontext of knowledge graphs. Thus, we introduce the notion of triple line\ngraph. The second is an edge weighting mechanism both for line graphs derived\nfrom knowledge graphs and homogeneous graphs. The third is a strategy based on\ngraph walks on the weighted triple line graph that can preserve proximity\nbetween nodes. Embeddings are finally generated by adopting the SkipGram model,\nwhere sentences are replaced with graph walks. We evaluate our approach on\ndifferent real world (knowledge) graphs and compared it with related work.", + "authors": "Valeria Fionda, Giuseppe Pirr\u00f3", + "published": "2019-05-28", + "updated": "2019-05-28", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.03236v1", + "title": "Graph2Graph Learning with Conditional Autoregressive Models", + "abstract": "We present a graph neural network model for solving graph-to-graph learning\nproblems. Most deep learning on graphs considers ``simple'' problems such as\ngraph classification or regressing real-valued graph properties. For such\ntasks, the main requirement for intermediate representations of the data is to\nmaintain the structure needed for output, i.e., keeping classes separated or\nmaintaining the order indicated by the regressor. However, a number of learning\ntasks, such as regressing graph-valued output, generative models, or graph\nautoencoders, aim to predict a graph-structured output. In order to\nsuccessfully do this, the learned representations need to preserve far more\nstructure. We present a conditional auto-regressive model for graph-to-graph\nlearning and illustrate its representational capabilities via experiments on\nchallenging subgraph predictions from graph algorithmics; as a graph\nautoencoder for reconstruction and visualization; and on pretraining\nrepresentations that allow graph classification with limited labeled data.", + "authors": "Guan Wang, Francois Bernard Lauze, Aasa Feragen", + "published": "2021-06-06", + "updated": "2021-06-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2012.05980v1", + "title": "CommPOOL: An Interpretable Graph Pooling Framework for Hierarchical Graph Representation Learning", + "abstract": "Recent years have witnessed the emergence and flourishing of hierarchical\ngraph pooling neural networks (HGPNNs) which are effective graph representation\nlearning approaches for graph level tasks such as graph classification.\nHowever, current HGPNNs do not take full advantage of the graph's intrinsic\nstructures (e.g., community structure). Moreover, the pooling operations in\nexisting HGPNNs are difficult to be interpreted. In this paper, we propose a\nnew interpretable graph pooling framework - CommPOOL, that can capture and\npreserve the hierarchical community structure of graphs in the graph\nrepresentation learning process. Specifically, the proposed community pooling\nmechanism in CommPOOL utilizes an unsupervised approach for capturing the\ninherent community structure of graphs in an interpretable manner. CommPOOL is\na general and flexible framework for hierarchical graph representation learning\nthat can further facilitate various graph-level tasks. Evaluations on five\npublic benchmark datasets and one synthetic dataset demonstrate the superior\nperformance of CommPOOL in graph representation learning for graph\nclassification compared to the state-of-the-art baseline methods, and its\neffectiveness in capturing and preserving the community structure of graphs.", + "authors": "Haoteng Tang, Guixiang Ma, Lifang He, Heng Huang, Liang Zhan", + "published": "2020-12-10", + "updated": "2020-12-10", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + } +] \ No newline at end of file