diff --git "a/related_34K/test_related_short_2404.17723v1.json" "b/related_34K/test_related_short_2404.17723v1.json" new file mode 100644--- /dev/null +++ "b/related_34K/test_related_short_2404.17723v1.json" @@ -0,0 +1,1435 @@ +[ + { + "url": "http://arxiv.org/abs/2404.17723v1", + "title": "Retrieval-Augmented Generation with Knowledge Graphs for Customer Service Question Answering", + "abstract": "In customer service technical support, swiftly and accurately retrieving\nrelevant past issues is critical for efficiently resolving customer inquiries.\nThe conventional retrieval methods in retrieval-augmented generation (RAG) for\nlarge language models (LLMs) treat a large corpus of past issue tracking\ntickets as plain text, ignoring the crucial intra-issue structure and\ninter-issue relations, which limits performance. We introduce a novel customer\nservice question-answering method that amalgamates RAG with a knowledge graph\n(KG). Our method constructs a KG from historical issues for use in retrieval,\nretaining the intra-issue structure and inter-issue relations. During the\nquestion-answering phase, our method parses consumer queries and retrieves\nrelated sub-graphs from the KG to generate answers. This integration of a KG\nnot only improves retrieval accuracy by preserving customer service structure\ninformation but also enhances answering quality by mitigating the effects of\ntext segmentation. Empirical assessments on our benchmark datasets, utilizing\nkey retrieval (MRR, Recall@K, NDCG@K) and text generation (BLEU, ROUGE, METEOR)\nmetrics, reveal that our method outperforms the baseline by 77.6% in MRR and by\n0.32 in BLEU. Our method has been deployed within LinkedIn's customer service\nteam for approximately six months and has reduced the median per-issue\nresolution time by 28.6%.", + "authors": "Zhentao Xu, Mark Jerome Cruz, Matthew Guevara, Tie Wang, Manasi Deshpande, Xiaofeng Wang, Zheng Li", + "published": "2024-04-26", + "updated": "2024-04-26", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR", + "cs.AI", + "cs.CL", + "cs.LG", + "I.2" + ], + "label": "Original Paper", + "paper_cat": "Graph AND Structure AND Learning", + "gt": "Question answering (QA) with knowledge graphs (KGs) can be broadly classified into retrieval-based, template-based, and semantic parsing-based methods. Retrieval-based approaches utilize relation extraction [19] or distributed representations [5] to derive answers from KGs, but they face difficulties with questions involving multiple entities. Template-based strategies depend on manually-created templates for encoding complex queries, yet are limited by the scope of available templates [16]. Semantic parsing methods map text to logical forms containing predicates from KGs [4] [14] [21]. Recent advancements in large language models (LLMs) integration with Knowledge Graphs (KGs) have demonstrated notable progress. Jin et al. [7] provide a comprehensive review of this integration, categorizing the roles of LLMs as Predictors, Encoders, and Aligners. For graph-based reasoning, Think-on-Graph [15] and Reasoning-on-Graph [10] enhance LLMs\u2019 reasoning abilities by integrating KGs. Yang et al. [20] propose augmenting LLMs\u2019 factual reasoning across various training phases using KGs. For LLM-based question answering, Wen et al.\u2019s Mindmap [18] and Qi et al. [13] employ KGs to boost LLM inference capabilities in specialized domains such as medicine and food. These contributions underscore the increasing efficacy of LLM and KG combinations in enhancing information retrieval and reasoning tasks.", + "pre_questions": [], + "main_content": "INTRODUCTION Effective technical support in customer service underpins product success, directly influencing customer satisfaction and loyalty. Given the frequent similarity of customer inquiries to previously resolved issues, the rapid and accurate retrieval of relevant past instances is crucial for the efficient resolution of such inquiries. Recent advancements in embedding-based retrieval (EBR), large language models (LLMs), and retrieval-augmented generation (RAG) [8] have significantly enhanced retrieval performance and questionanswering capabilities for the technical support of customer service. This process typically unfolds in two stages: first, historical issue tickets are treated as plain text, segmented into smaller chunks to accommodate the context length constraints of embedding models; each chunk is then converted into an embedding vector for retrieval. Second, during the question-answering phase, the system retrieves the most relevant chunks and feeds them as contexts for LLMs to generate answers in response to queries. Despite its straightforward approach, this method encounters several limitations: \u2022 Limitation 1 Compromised Retrieval Accuracy from Ignoring Structures: Issue tracking documents such as Jira [2] possess inherent structure and are interconnected, with references such as \"issue A is related to/copied from/caused arXiv:2404.17723v1 [cs.IR] 26 Apr 2024 SIGIR \u201924, July 14\u201318, 2024, Washington, DC, USA Zhentao Xu, et al. by issue B.\" The conventional approach of compressing documents into text chunks leads to the loss of vital information. Our approach parses issue tickets into trees and further connects individual issue tickets to form an interconnected graph, which maintains this intrinsic relationship among entities, achieving high retrieval performance. Limitation 2 Reduced Answer Quality from Segmenentities, achieving high retrieval performance. \u2022 Limitation 2 Reduced Answer Quality from Segmentation: Segmenting extensive issue tickets into fixed-length segments to accommodate the context length constraints of embedding models can result in the disconnection of related content, leading to incomplete answers. For example, an issue ticket describing an issue at its beginning and its solution at the end may be split during the text segmentation process, resulting in the omission of critical parts of the solution. Our graph-based parsing method overcomes this by preserving the logical coherence of ticket sections, ensuring the delivery of complete and high-quality responses. We introduce an LLM-based customer service question answering system that seamlessly integrates retrieval-augmented generation (RAG) with a knowledge graph (KG). Our system (Figure 1) comprises two phases: First, during the KG construction phase, our system constructs a comprehensive knowledge graph from historical customer service issue tickets. It integrates a tree-structured representation of each issue and interlinks them based on relational context. It also generates embedding for each node to facilitate later semantic searching. Second, during the question-answering phase, our method parses consumer queries to identify named entities and intents. It then navigates within the KG to identify related sub-graphs for generating answers. 3.1 Knowledge Graph Construction 3.1.1 Graph Structure Definition. In defining the knowledge graph structure for historical issue representation, we employ a dual-level architecture that segregates intra-issue and inter-issue relations, as illustrated in Figure 1. The Intra-issue Tree T \ud835\udc56(N, E, R) models each ticket \ud835\udc61\ud835\udc56as a tree, where each node \ud835\udc5b\u2208N, identified by a unique combination (\ud835\udc56,\ud835\udc60), corresponds to a distinct section \ud835\udc60of ticket \ud835\udc61\ud835\udc56, and each edge \ud835\udc52\u2208E and \ud835\udc5f\u2208R signifies the hierarchical connection and type of relations between these sections. The Inter-issue Graph G(T, E, R) represents the network of connections across different tickets, incorporating both explicit links Eexp, defined in issue tracking tickets, and implicit connections Eimp, derived from semantic similarity between tickets. For implicit connections, we leverage cosine similarity between the embedding vectors of ticket titles, a method adaptable to specific use cases. For instance, Figure 1 portrays ticket ENT-22970 as a tree structure with nodes representing sections such as Summary, Description, and Priority. It exhibits a direct clone linkage to PORT-133061, indicating an explicit clone relationship. Additionally, it\u2019s implicitly connected with ENT-1744 and ENT-3547 due to the semantic similarities. 3.1.2 Knowledge Graph Construction. Graph construction is delineated into two phases: intra-ticket parsing and inter-ticket connection. 1) Intra-Ticket Parsing Phase: This phase transforms each text-based ticket \ud835\udc61\ud835\udc56into a tree representation T \ud835\udc56. We employ a hybrid methodology, initially utilizing rule-based extraction for predefined fields, such as code sections identified via keywords. Subsequently, for text not amenable to rule-based parsing, we engage an LLM for parsing. The LLM is directed by a YAML template T template, representing in graph the ticket sections routinely utilized by customer support. 2) Inter-Ticket Connection Phase: Here, individual trees T \ud835\udc56are amalgamated into a comprehensive graph G. Explicit connections Eexp are delineated as specified within tickets, exemplified by designated fields in Jira [2]. Implicit connections Eimp are inferred from textual-semantic similarities across ticket titles, employing embedding techniques and a threshold mechanism to discern the most relevant tickets for each issue ticket. \ud835\udc61\ud835\udc56= \ud835\udc61\ud835\udc56,rule \u222a\ud835\udc61\ud835\udc56,llm T \ud835\udc56= RuleParse(\ud835\udc61\ud835\udc56,rule) + LLMParse(\ud835\udc61\ud835\udc56,llm, T template, prompt) Eexp = {(T \ud835\udc56, T \ud835\udc57) | T \ud835\udc56explicitly connected to T \ud835\udc57} Eimp = {(T \ud835\udc56, T \ud835\udc57) | cos(embed(T \ud835\udc56), embed(T \ud835\udc57)) \u2265\ud835\udf03} To support online embedding E {(T T ) |((T )(T )) \u2265} 3.1.3 Embedding Generation. To support online embedding-based retrieval, we generate embeddings for graph node values using pre-trained text-embedding models like BERT [6] and E5 [17], specifically targeting nodes for text-rich sections such as \"issue summary\", \"issue description\", and \"steps to reproduce\", etc. These embeddings are then stored in a vector database (for instance, QDrant [12])). For most cases the text-length within each node can meet the text-embedding model\u2019s context length constraints, but for certain lengthy texts,we can safely divide the text into smaller chunks for individual embedding without worrying about quality since the text all belong to the same section. Retrieval-Augmented Generation with Knowledge Graphs for Customer Service Question Answering SIGIR \u201924, July 14\u201318, 2024, Washington, DC, USA Question Intent Embedding-based Retrieval Filtering Filtering Ti cket ENT22970 CSV upl oad er r or , updat i ng user em ai l \" CSV upl oad er r or , updat i ng user em ai l \" HAS_SUM M ARY [ \" user 1\" : \" Do we know how t hese dupl i cat ed pr of i l es got cr eat ed?\" , . . . . \" user 2\" : \" cl eaned up 228 dupl i cat e pr of i l es, r esol ved\" , \" user 1\" : \" t hanks, t i cket cl osed\" ] HAS_CO M M ENTS Fi el ds HAS_FI ELDS Descr i pt i on HAS_DESCRI PTI O N Dat a I ssue HAS_RO O T_CAUSE M aj or St r at egi c HAS_PRI O RI TY HAS_I M PACT_AREA \" Adm i n seei ng sever al er r or s when at t em pt i ng updat e of user em ai l s on dashboar d I D \" xxxxxxxxx' . Tot al num ber of user s af f ect ed ~' yyy' . \" HAS_I SSUE_DESCRI PTI O N Ref er t o t he CSV: ht t ps: / / m i cr osof t . shar epoi nt . com / xxx: 1. O pen t he Dashboar d I D xxxxx; 2. Cl i ck on I nst ances > Pr of i l e; 3. Sear ch f or user s f r om t he CSV f i l e and not e t hat t her e ar e 2 pr of i l es exi st . HAS_STEPS_TO _REPRO DUCE CLO NE_FRO M CLO NE_TO Q uest i on Q uer y: How t o r epr oduce t he i ssue wher e user saw \" csv upl oad er r or i n updat i ng user em ai l \" and has m aj or pr i or i t y t hat was caused by dat a i ssue? I nt ent : \" St eps t o Repr oduce\" Sum m ar y: \" CSV upl oad er r or i n updat i ng user em ai l \" Pr i or i t y: \" M aj or \" Root Cause: \" Dat a I ssue\" Ent i t y Det ect i on Fi nal Answer : based on t he t i cket ENT22970, t he st eps t o r epr oduce t he i ssue i s \" 1. Ref er t o t he CSV: ht t ps: / / m i cr osof t . shar epoi nt . com / xxx 2. O pen t he Dashboar d I D xxxxxxxxx 3. Cl i ck on I nst ances > Pr of i l e 4. Sear ch f or user s f r om t he CSV f i l e and not e t hat t her e ar e 2 pr of i l es t hat com e up. Answer G ener at i on 4 4 5 5 5 5 6 SI M I LAR_TO SI M I LAR_TO Ti cket ENT22970 Ti cket PO RT133061 Ti cket ENT1744 Ti cket ENT3547 CLO NE_FRO M CLO NE_TO intra-ticket tree representation inter-ticket connections Knowledge Graph Construction Retrieval and Question Answering 1 2 Vector Database 3 Text em beddi ng G ener at i on f or Node Val ues I nt ent Cl assi f i cat i on i nt er t i cket connect i on ( i m pl i ci t EBR, expl i ci t ) i nt r at i cket t r ee par si ng Graph Database issue-tracking ticket 1-6 step with step numbers Vector DB Graph DB graph nodes with links Step with LLM Ti cket ENT3547 Lear ni ng ' upl oad csv' opt i on f ai l s . . . . . . . . . Ti cket ENT1744 HTTP PO ST csv upl oad er r or i nt er nal er r or . . . . . . . . . Ti cket PO RT133061 \" CSV upl oad er r or , updat i ng user em ai l \" . . . . . . . . . Legends Figure 1: An overview of our proposed retrieval-augmented generation with knowledge graph framework. The left side of this diagram illustrates the knowledge graph construction; the right side shows the retrieval and question answering process. 3.2 Retrieval and Question Answering 3.2.1 Query Entity Identification and Intent Detection. In this step, we extract the named entities P of type Map(N \u2192V) and the query intent set I from each user query \ud835\udc5e. The method involves parsing each query \ud835\udc5einto a key-value pair, where each key \ud835\udc5b, mentioned within the query, corresponds to an element in the graph template T template, and the value \ud835\udc63represents the information extracted from the query. Concurrently, the query intents I include the entities mentioned in the graph template T template that the query aims to address. We leverage LLM with a suitable prompt in this parsing process. For instance, given the query \ud835\udc5e= \"How to reproduce the login issue where a user can\u2019t log in to LinkedIn?\", the extracted entity is P = Map(\"issue summary\" \u2192\"login issue\", \"issue description\" \u2192\"user can\u2019t log in to LinkedIn\"), and the intent set is I=Set(\"fix solution\"). This method demonstrates notable flexibility in accommodating varied query formulations by leveraging the LLM\u2019s extensive understanding and interpretive capabilities. \ud835\udc43, \ud835\udc3c= LLM(\ud835\udc5e,\ud835\udc47template, prompt) 3.2.2 Embedding-based Retrieval of Sub-graphs. Our method extracts pertinent sub-graphs from the knowledge graph, aligned with user-provided specifics such as \"issue description\" and \"issue summary\", as well as user intentions like \"fix solution\". This process consists of two primary steps: EBR-based ticket identification and LLM-driven subgraph extraction. In the EBR-based ticket identification step, the top \ud835\udc3eticket most relevant historical issue tickets are pinpointed by harnessing the named entity set P derived from user queries. For each entity pair (\ud835\udc58, \ud835\udc63) \u2208P, cosine similarity is computed between the entity value \ud835\udc63and all graph nodes \ud835\udc5bcorresponding to section \ud835\udc58via pretrained text embeddings. Aggregating these node-level scores to ticket-level by summing contributions from nodes belonging to SIGIR \u201924, July 14\u201318, 2024, Washington, DC, USA Zhentao Xu, et al. the same ticket, we rank and select the top \ud835\udc3eticket tickets. This method presupposes that the occurrence of multiple query entities is indicative of pertinent links, thus improving retrieval precision. \ud835\udc46\ud835\udc47\ud835\udc56= \u2211\ufe01 (\ud835\udc58,\ud835\udc63)\u2208P \uf8ee \uf8ef \uf8ef \uf8ef \uf8ef \uf8f0 \u2211\ufe01 \ud835\udc5b\u2208\ud835\udc47\ud835\udc56 I{\ud835\udc5b.sec = \ud835\udc58} \u00b7 cos(embed(\ud835\udc63), embed(\ud835\udc5b.text)) \uf8f9 \uf8fa \uf8fa \uf8fa \uf8fa \uf8fb In the LLM-driven subgraph extraction step, the system first rephrases the original user query \ud835\udc5eto include the retrieved ticket ID; the modified query \ud835\udc5e\u2032 is then translated into a graph database language, such as Cypher for Neo4j for question answering. For instance, from the initial query \ud835\udc5e=\"how to reproduce the issue where user saw \u2019csv upload error in updating user email\u2019 with major priority due to a data issue\", the query is reformulated to \"how to reproduce \u2019ENT-22970\u2019 and thereafter transposed into the Cypher query MATCH (j:Ticket {ticket_ID: \u2019ENT-22970\u2019}) -[:HAS_DESCRIPTION]-> (description:Description) -[:HAS_STEPS_TO_REPRODUCE]-> (steps_to_reproduce: StepsToReproduce) RETURN steps_to_reproduce.value. It is noteworthy that the LLM-driven query formulation is sufficiently versatile to retrieve information across subgraphs, whether they originate from the same tree or distinct trees within the knowledge graph. 3.2.3 Answer Generation. Answers are synthesized by correlating retrieved data from Section 3.2.2 with the initial query. The LLM serves as a decoder to formulate responses to user inquiries given the retrieved information. For robust online serving, if query execution encounters issues, a fallback mechanism reverts to a baseline text-based retrieval method 4 EXPERIMENT 4.1 Experiment Design Our evaluation employed a curated \"golden\" dataset comprising typical queries, support tickets, and their authoritative solutions. The control group operated with conventional text-based EBR, while the experimental group applied the methodology outlined in this study. For both groups, we utilized the same LLM, specifically GPT-4 [1], and the same embedding model, E5 [17]. We measured retrieval efficacy using Mean Reciprocal Rank (MRR), recall@K, and NDCG@K. MRR gauges the average inverse rank of the initial correct response, recall@K determines the likelihood of a relevant item\u2019s appearance within the top K selections, and NDCG@K appraises the rank quality by considering both position and pertinence of items. For question-answering performance, we juxtaposed the \"golden\" solutions against the generated responses, utilizing metrics such as BLEU [11], ROUGE [9], and METEOR [3] scores. 4.2 Result and Analysis The retrieval and question-answering performances are presented in Table 1 and Table 2, respectively. Across all metrics, our method demonstrates consistent improvements. Notably, it surpasses the baseline by 77.6% in MRR and by 0.32 in BLEU score, substantiating its superior retrieval efficacy and question-answering accuracy. Table 1: Retrieval Performance MRR Recall@K NDCG@K K=1 K=3 K=1 K=3 Baseline 0.522 0.400 0.640 0.400 0.520 Experiment 0.927 0.860 1.000 0.860 0.946 Table 2: Question Answering Performance BLEU METEOR ROUGE Baseline 0.057 0.279 0.183 Experiment 0.377 0.613 0.546 5 PRODUCTION USE CASE We deployed our method within LinkedIn\u2019s customer service team, covering multiple product lines. The team was split randomly into two groups: one used our system, while the other stuck to traditional manual methods. As shown in Table 3, the group using our system achieved significant gains, reducing the median resolution time per issue by 28.6%. This highlights our system\u2019s effectiveness in enhancing customer service efficiency. Table 3: Customer Support Issue Resolution Time Group Mean P50 P90 Tool Not Used 40 Hours 7 Hours 87 Hours Tool Used 15 hours 5 hours 47 hours 6 CONCLUSIONS AND FUTURE WORK In conclusion, our research significantly advances automated question answering systems for customer service. Integrating retrieval augmented generation (RAG) with a knowledge graph (KG) has improved retrieval and answering metrics, and overall service effectiveness. Future work will focus on: developing an automated mechanism for extracting graph templates, enhancing system adaptability; investigating dynamic updates to the knowledge graph based on user queries to improve real-time responsiveness; and exploring the system\u2019s applicability in other contexts beyond customer service. 7 COMPANY PORTRAIT About LinkedIn: Founded in 2003, LinkedIn connects the world\u2019s professionals to make them more productive and successful. With more than 1 billion members worldwide, including executives from every Fortune 500 company, LinkedIn is the world\u2019s largest professional network. The company has a diversified business model with revenue coming from Talent Solutions, Marketing Solutions, Sales Solutions and Premium Subscriptions products. Headquartered in Silicon Valley, LinkedIn has offices across the globe. Please visit https://www.linkedin.com/company/linkedin/about/ for more information. Retrieval-Augmented Generation with Knowledge Graphs for Customer Service Question Answering SIGIR \u201924, July 14\u201318, 2024, Washington, DC, USA 8 PRESENTER BIO Zhentao Xu is a Senior Software Engineer at LinkedIn. He received his M.S. in Robotics and B.S. in Electrical Engineering and Computer Science (EECS) from University of Michigan. His research interests lie in large language models and natural language generation." + }, + { + "url": "http://arxiv.org/abs/2307.07697v6", + "title": "Think-on-Graph: Deep and Responsible Reasoning of Large Language Model on Knowledge Graph", + "abstract": "Although large language models (LLMs) have achieved significant success in\nvarious tasks, they often struggle with hallucination problems, especially in\nscenarios requiring deep and responsible reasoning. These issues could be\npartially addressed by introducing external knowledge graphs (KG) in LLM\nreasoning. In this paper, we propose a new LLM-KG integrating paradigm\n``$\\hbox{LLM}\\otimes\\hbox{KG}$'' which treats the LLM as an agent to\ninteractively explore related entities and relations on KGs and perform\nreasoning based on the retrieved knowledge. We further implement this paradigm\nby introducing a new approach called Think-on-Graph (ToG), in which the LLM\nagent iteratively executes beam search on KG, discovers the most promising\nreasoning paths, and returns the most likely reasoning results. We use a number\nof well-designed experiments to examine and illustrate the following advantages\nof ToG: 1) compared with LLMs, ToG has better deep reasoning power; 2) ToG has\nthe ability of knowledge traceability and knowledge correctability by\nleveraging LLMs reasoning and expert feedback; 3) ToG provides a flexible\nplug-and-play framework for different LLMs, KGs and prompting strategies\nwithout any additional training cost; 4) the performance of ToG with small LLM\nmodels could exceed large LLM such as GPT-4 in certain scenarios and this\nreduces the cost of LLM deployment and application. As a training-free method\nwith lower computational cost and better generality, ToG achieves overall SOTA\nin 6 out of 9 datasets where most previous SOTAs rely on additional training.", + "authors": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, Jian Guo", + "published": "2023-07-15", + "updated": "2024-03-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2310.01061v2", + "title": "Reasoning on Graphs: Faithful and Interpretable Large Language Model Reasoning", + "abstract": "Large language models (LLMs) have demonstrated impressive reasoning abilities\nin complex tasks. However, they lack up-to-date knowledge and experience\nhallucinations during reasoning, which can lead to incorrect reasoning\nprocesses and diminish their performance and trustworthiness. Knowledge graphs\n(KGs), which capture vast amounts of facts in a structured format, offer a\nreliable source of knowledge for reasoning. Nevertheless, existing KG-based LLM\nreasoning methods only treat KGs as factual knowledge bases and overlook the\nimportance of their structural information for reasoning. In this paper, we\npropose a novel method called reasoning on graphs (RoG) that synergizes LLMs\nwith KGs to enable faithful and interpretable reasoning. Specifically, we\npresent a planning-retrieval-reasoning framework, where RoG first generates\nrelation paths grounded by KGs as faithful plans. These plans are then used to\nretrieve valid reasoning paths from the KGs for LLMs to conduct faithful\nreasoning. Furthermore, RoG not only distills knowledge from KGs to improve the\nreasoning ability of LLMs through training but also allows seamless integration\nwith any arbitrary LLMs during inference. Extensive experiments on two\nbenchmark KGQA datasets demonstrate that RoG achieves state-of-the-art\nperformance on KG reasoning tasks and generates faithful and interpretable\nreasoning results.", + "authors": "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, Shirui Pan", + "published": "2023-10-02", + "updated": "2024-02-24", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1809.00782v1", + "title": "Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text", + "abstract": "Open Domain Question Answering (QA) is evolving from complex pipelined\nsystems to end-to-end deep neural networks. Specialized neural models have been\ndeveloped for extracting answers from either text alone or Knowledge Bases\n(KBs) alone. In this paper we look at a more practical setting, namely QA over\nthe combination of a KB and entity-linked text, which is appropriate when an\nincomplete KB is available with a large text corpus. Building on recent\nadvances in graph representation learning we propose a novel model, GRAFT-Net,\nfor extracting answers from a question-specific subgraph containing text and KB\nentities and relations. We construct a suite of benchmark tasks for this\nproblem, varying the difficulty of questions, the amount of training data, and\nKB completeness. We show that GRAFT-Net is competitive with the\nstate-of-the-art when tested using either KBs or text alone, and vastly\noutperforms existing methods in the combined setting. Source code is available\nat https://github.com/OceanskySun/GraftNet .", + "authors": "Haitian Sun, Bhuwan Dhingra, Manzil Zaheer, Kathryn Mazaitis, Ruslan Salakhutdinov, William W. Cohen", + "published": "2018-09-04", + "updated": "2018-09-04", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.02783v2", + "title": "Large Language Models on Graphs: A Comprehensive Survey", + "abstract": "Large language models (LLMs), such as GPT4 and LLaMA, are creating\nsignificant advancements in natural language processing, due to their strong\ntext encoding/decoding ability and newly found emergent capability (e.g.,\nreasoning). While LLMs are mainly designed to process pure texts, there are\nmany real-world scenarios where text data is associated with rich structure\ninformation in the form of graphs (e.g., academic networks, and e-commerce\nnetworks) or scenarios where graph data is paired with rich textual information\n(e.g., molecules with descriptions). Besides, although LLMs have shown their\npure text-based reasoning ability, it is underexplored whether such ability can\nbe generalized to graphs (i.e., graph-based reasoning). In this paper, we\nprovide a systematic review of scenarios and techniques related to large\nlanguage models on graphs. We first summarize potential scenarios of adopting\nLLMs on graphs into three categories, namely pure graphs, text-attributed\ngraphs, and text-paired graphs. We then discuss detailed techniques for\nutilizing LLMs on graphs, including LLM as Predictor, LLM as Encoder, and LLM\nas Aligner, and compare the advantages and disadvantages of different schools\nof models. Furthermore, we discuss the real-world applications of such methods\nand summarize open-source codes and benchmark datasets. Finally, we conclude\nwith potential future research directions in this fast-growing field. The\nrelated source can be found at\nhttps://github.com/PeterGriffinJin/Awesome-Language-Model-on-Graphs.", + "authors": "Bowen Jin, Gang Liu, Chi Han, Meng Jiang, Heng Ji, Jiawei Han", + "published": "2023-12-05", + "updated": "2024-02-01", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2308.09729v5", + "title": "MindMap: Knowledge Graph Prompting Sparks Graph of Thoughts in Large Language Models", + "abstract": "Large language models (LLMs) have achieved remarkable performance in natural\nlanguage understanding and generation tasks. However, they often suffer from\nlimitations such as difficulty in incorporating new knowledge, generating\nhallucinations, and explaining their reasoning process. To address these\nchallenges, we propose a novel prompting pipeline, named \\method, that\nleverages knowledge graphs (KGs) to enhance LLMs' inference and transparency.\nOur method enables LLMs to comprehend KG inputs and infer with a combination of\nimplicit and external knowledge. Moreover, our method elicits the mind map of\nLLMs, which reveals their reasoning pathways based on the ontology of\nknowledge. We evaluate our method on diverse question \\& answering tasks,\nespecially in medical domains, and show significant improvements over\nbaselines. We also introduce a new hallucination evaluation benchmark and\nanalyze the effects of different components of our method. Our results\ndemonstrate the effectiveness and robustness of our method in merging knowledge\nfrom LLMs and KGs for combined inference. To reproduce our results and extend\nthe framework further, we make our codebase available at\nhttps://github.com/wyl-willing/MindMap.", + "authors": "Yilin Wen, Zifeng Wang, Jimeng Sun", + "published": "2023-08-17", + "updated": "2024-03-02", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1404.4326v1", + "title": "Open Question Answering with Weakly Supervised Embedding Models", + "abstract": "Building computers able to answer questions on any subject is a long standing\ngoal of artificial intelligence. Promising progress has recently been achieved\nby methods that learn to map questions to logical forms or database queries.\nSuch approaches can be effective but at the cost of either large amounts of\nhuman-labeled data or by defining lexicons and grammars tailored by\npractitioners. In this paper, we instead take the radical approach of learning\nto map questions to vectorial feature representations. By mapping answers into\nthe same space one can query any knowledge base independent of its schema,\nwithout requiring any grammar or lexicon. Our method is trained with a new\noptimization procedure combining stochastic gradient descent followed by a\nfine-tuning step using the weak supervision provided by blending automatically\nand collaboratively generated resources. We empirically demonstrate that our\nmodel can capture meaningful signals from its noisy supervision leading to\nmajor improvements over paralex, the only existing method able to be trained on\nsimilar weakly labeled data.", + "authors": "Antoine Bordes, Jason Weston, Nicolas Usunier", + "published": "2014-04-16", + "updated": "2014-04-16", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2308.10173v1", + "title": "FoodGPT: A Large Language Model in Food Testing Domain with Incremental Pre-training and Knowledge Graph Prompt", + "abstract": "Currently, the construction of large language models in specific domains is\ndone by fine-tuning on a base model. Some models also incorporate knowledge\nbases without the need for pre-training. This is because the base model already\ncontains domain-specific knowledge during the pre-training process. We build a\nlarge language model for food testing. Unlike the above approach, a significant\namount of data in this domain exists in Scanning format for domain standard\ndocuments. In addition, there is a large amount of untrained structured\nknowledge. Therefore, we introduce an incremental pre-training step to inject\nthis knowledge into a large language model. In this paper, we propose a method\nfor handling structured knowledge and scanned documents in incremental\npre-training. To overcome the problem of machine hallucination, we constructe a\nknowledge graph to serve as an external knowledge base for supporting retrieval\nin the large language model. It is worth mentioning that this paper is a\ntechnical report of our pre-release version, and we will report our specific\nexperimental data in future versions.", + "authors": "Zhixiao Qi, Yijiong Yu, Meiqi Tu, Junyi Tan, Yongfeng Huang", + "published": "2023-08-20", + "updated": "2023-08-20", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.04762v1", + "title": "The Graph Lottery Ticket Hypothesis: Finding Sparse, Informative Graph Structure", + "abstract": "Graph learning methods help utilize implicit relationships among data items,\nthereby reducing training label requirements and improving task performance.\nHowever, determining the optimal graph structure for a particular learning task\nremains a challenging research problem.\n In this work, we introduce the Graph Lottery Ticket (GLT) Hypothesis - that\nthere is an extremely sparse backbone for every graph, and that graph learning\nalgorithms attain comparable performance when trained on that subgraph as on\nthe full graph. We identify and systematically study 8 key metrics of interest\nthat directly influence the performance of graph learning algorithms.\nSubsequently, we define the notion of a \"winning ticket\" for graph structure -\nan extremely sparse subset of edges that can deliver a robust approximation of\nthe entire graph's performance. We propose a straightforward and efficient\nalgorithm for finding these GLTs in arbitrary graphs. Empirically, we observe\nthat performance of different graph learning algorithms can be matched or even\nexceeded on graphs with the average degree as low as 5.", + "authors": "Anton Tsitsulin, Bryan Perozzi", + "published": "2023-12-08", + "updated": "2023-12-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2104.08163v1", + "title": "Finding Motifs in Knowledge Graphs using Compression", + "abstract": "We introduce a method to find network motifs in knowledge graphs. Network\nmotifs are useful patterns or meaningful subunits of the graph that recur\nfrequently. We extend the common definition of a network motif to coincide with\na basic graph pattern. We introduce an approach, inspired by recent work for\nsimple graphs, to induce these from a given knowledge graph, and show that the\nmotifs found reflect the basic structure of the graph. Specifically, we show\nthat in random graphs, no motifs are found, and that when we insert a motif\nartificially, it can be detected. Finally, we show the results of motif\ninduction on three real-world knowledge graphs.", + "authors": "Peter Bloem", + "published": "2021-04-16", + "updated": "2021-04-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.DS", + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1104.5256v1", + "title": "Learning Undirected Graphical Models with Structure Penalty", + "abstract": "In undirected graphical models, learning the graph structure and learning the\nfunctions that relate the predictive variables (features) to the responses\ngiven the structure are two topics that have been widely investigated in\nmachine learning and statistics. Learning graphical models in two stages will\nhave problems because graph structure may change after considering the\nfeatures. The main contribution of this paper is the proposed method that\nlearns the graph structure and functions on the graph at the same time. General\ngraphical models with binary outcomes conditioned on predictive variables are\nproved to be equivalent to multivariate Bernoulli model. The reparameterization\nof the potential functions in graphical model by conditional log odds ratios in\nmultivariate Bernoulli model offers advantage in the representation of the\nconditional independence structure in the model. Additionally, we impose a\nstructure penalty on groups of conditional log odds ratios to learn the graph\nstructure. These groups of functions are designed with overlaps to enforce\nhierarchical function selection. In this way, we are able to shrink higher\norder interactions to obtain a sparse graph structure. Simulation studies show\nthat the method is able to recover the graph structure. The analysis of county\ndata from Census Bureau gives interesting relations between unemployment rate,\ncrime and others discovered by the model.", + "authors": "Shilin Ding", + "published": "2011-04-27", + "updated": "2011-04-27", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1906.02319v1", + "title": "DEMO-Net: Degree-specific Graph Neural Networks for Node and Graph Classification", + "abstract": "Graph data widely exist in many high-impact applications. Inspired by the\nsuccess of deep learning in grid-structured data, graph neural network models\nhave been proposed to learn powerful node-level or graph-level representation.\nHowever, most of the existing graph neural networks suffer from the following\nlimitations: (1) there is limited analysis regarding the graph convolution\nproperties, such as seed-oriented, degree-aware and order-free; (2) the node's\ndegree-specific graph structure is not explicitly expressed in graph\nconvolution for distinguishing structure-aware node neighborhoods; (3) the\ntheoretical explanation regarding the graph-level pooling schemes is unclear.\n To address these problems, we propose a generic degree-specific graph neural\nnetwork named DEMO-Net motivated by Weisfeiler-Lehman graph isomorphism test\nthat recursively identifies 1-hop neighborhood structures. In order to\nexplicitly capture the graph topology integrated with node attributes, we argue\nthat graph convolution should have three properties: seed-oriented,\ndegree-aware, order-free. To this end, we propose multi-task graph convolution\nwhere each task represents node representation learning for nodes with a\nspecific degree value, thus leading to preserving the degree-specific graph\nstructure. In particular, we design two multi-task learning methods:\ndegree-specific weight and hashing functions for graph convolution. In\naddition, we propose a novel graph-level pooling/readout scheme for learning\ngraph representation provably lying in a degree-specific Hilbert kernel space.\nThe experimental results on several node and graph classification benchmark\ndata sets demonstrate the effectiveness and efficiency of our proposed DEMO-Net\nover state-of-the-art graph neural network models.", + "authors": "Jun Wu, Jingrui He, Jiejun Xu", + "published": "2019-06-05", + "updated": "2019-06-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1911.08776v2", + "title": "Joint Embedding Learning of Educational Knowledge Graphs", + "abstract": "As an efficient model for knowledge organization, the knowledge graph has\nbeen widely adopted in several fields, e.g., biomedicine, sociology, and\neducation. And there is a steady trend of learning embedding representations of\nknowledge graphs to facilitate knowledge graph construction and downstream\ntasks. In general, knowledge graph embedding techniques aim to learn vectorized\nrepresentations which preserve the structural information of the graph. And\nconventional embedding learning models rely on structural relationships among\nentities and relations. However, in educational knowledge graphs, structural\nrelationships are not the focus. Instead, rich literals of the graphs are more\nvaluable. In this paper, we focus on this problem and propose a novel model for\nembedding learning of educational knowledge graphs. Our model considers both\nstructural and literal information and jointly learns embedding\nrepresentations. Three experimental graphs were constructed based on an\neducational knowledge graph which has been applied in real-world teaching. We\nconducted two experiments on the three graphs and other common benchmark\ngraphs. The experimental results proved the effectiveness of our model and its\nsuperiority over other baselines when processing educational knowledge graphs.", + "authors": "Siyu Yao, Ruijie Wang, Shen Sun, Derui Bu, Jun Liu", + "published": "2019-11-20", + "updated": "2019-12-23", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2105.00696v1", + "title": "Graph Learning: A Survey", + "abstract": "Graphs are widely used as a popular representation of the network structure\nof connected data. Graph data can be found in a broad spectrum of application\ndomains such as social systems, ecosystems, biological networks, knowledge\ngraphs, and information systems. With the continuous penetration of artificial\nintelligence technologies, graph learning (i.e., machine learning on graphs) is\ngaining attention from both researchers and practitioners. Graph learning\nproves effective for many tasks, such as classification, link prediction, and\nmatching. Generally, graph learning methods extract relevant features of graphs\nby taking advantage of machine learning algorithms. In this survey, we present\na comprehensive overview on the state-of-the-art of graph learning. Special\nattention is paid to four categories of existing graph learning methods,\nincluding graph signal processing, matrix factorization, random walk, and deep\nlearning. Major models and algorithms under these categories are reviewed\nrespectively. We examine graph learning applications in areas such as text,\nimages, science, knowledge graphs, and combinatorial optimization. In addition,\nwe discuss several promising research directions in this field.", + "authors": "Feng Xia, Ke Sun, Shuo Yu, Abdul Aziz, Liangtian Wan, Shirui Pan, Huan Liu", + "published": "2021-05-03", + "updated": "2021-05-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI", + "68T07", + "I.2.6" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2212.04934v1", + "title": "Learning Graph Algorithms With Recurrent Graph Neural Networks", + "abstract": "Classical graph algorithms work well for combinatorial problems that can be\nthoroughly formalized and abstracted. Once the algorithm is derived, it\ngeneralizes to instances of any size. However, developing an algorithm that\nhandles complex structures and interactions in the real world can be\nchallenging. Rather than specifying the algorithm, we can try to learn it from\nthe graph-structured data. Graph Neural Networks (GNNs) are inherently capable\nof working on graph structures; however, they struggle to generalize well, and\nlearning on larger instances is challenging. In order to scale, we focus on a\nrecurrent architecture design that can learn simple graph problems end to end\non smaller graphs and then extrapolate to larger instances. As our main\ncontribution, we identify three essential techniques for recurrent GNNs to\nscale. By using (i) skip connections, (ii) state regularization, and (iii) edge\nconvolutions, we can guide GNNs toward extrapolation. This allows us to train\non small graphs and apply the same model to much larger graphs during\ninference. Moreover, we empirically validate the extrapolation capabilities of\nour GNNs on algorithmic datasets.", + "authors": "Florian Gr\u00f6tschla, Jo\u00ebl Mathys, Roger Wattenhofer", + "published": "2022-12-09", + "updated": "2022-12-09", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2109.11796v1", + "title": "Edge but not Least: Cross-View Graph Pooling", + "abstract": "Graph neural networks have emerged as a powerful model for graph\nrepresentation learning to undertake graph-level prediction tasks. Various\ngraph pooling methods have been developed to coarsen an input graph into a\nsuccinct graph-level representation through aggregating node embeddings\nobtained via graph convolution. However, most graph pooling methods are heavily\nnode-centric and are unable to fully leverage the crucial information contained\nin global graph structure. This paper presents a cross-view graph pooling\n(Co-Pooling) method to better exploit crucial graph structure information. The\nproposed Co-Pooling fuses pooled representations learnt from both node view and\nedge view. Through cross-view interaction, edge-view pooling and node-view\npooling seamlessly reinforce each other to learn more informative graph-level\nrepresentations. Co-Pooling has the advantage of handling various graphs with\ndifferent types of node attributes. Extensive experiments on a total of 15\ngraph benchmark datasets validate the effectiveness of our proposed method,\ndemonstrating its superior performance over state-of-the-art pooling methods on\nboth graph classification and graph regression tasks.", + "authors": "Xiaowei Zhou, Jie Yin, Ivor W. Tsang", + "published": "2021-09-24", + "updated": "2021-09-24", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1609.04350v2", + "title": "Time-Variant Graph Classification", + "abstract": "Graphs are commonly used to represent objects, such as images and text, for\npattern classification. In a dynamic world, an object may continuously evolve\nover time, and so does the graph extracted from the underlying object. These\nchanges in graph structure with respect to the temporal order present a new\nrepresentation of the graph, in which an object corresponds to a set of\ntime-variant graphs. In this paper, we formulate a novel time-variant graph\nclassification task and propose a new graph feature, called a graph-shapelet\npattern, for learning and classifying time-variant graphs. Graph-shapelet\npatterns are compact and discriminative graph transformation subsequences. A\ngraph-shapelet pattern can be regarded as a graphical extension of a shapelet\n-- a class of discriminative features designed for vector-based temporal data\nclassification. To discover graph-shapelet patterns, we propose to convert a\ntime-variant graph sequence into time-series data and use the discovered\nshapelets to find graph transformation subsequences as graph-shapelet patterns.\nBy converting each graph-shapelet pattern into a unique tokenized graph\ntransformation sequence, we can measure the similarity between two\ngraph-shapelet patterns and therefore classify time-variant graphs. Experiments\non both synthetic and real-world data demonstrate the superior performance of\nthe proposed algorithms.", + "authors": "Haishuai Wang", + "published": "2016-09-14", + "updated": "2017-06-12", + "primary_cat": "cs.DS", + "cats": [ + "cs.DS" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2307.02126v1", + "title": "Robust Graph Structure Learning with the Alignment of Features and Adjacency Matrix", + "abstract": "To improve the robustness of graph neural networks (GNN), graph structure\nlearning (GSL) has attracted great interest due to the pervasiveness of noise\nin graph data. Many approaches have been proposed for GSL to jointly learn a\nclean graph structure and corresponding representations. To extend the previous\nwork, this paper proposes a novel regularized GSL approach, particularly with\nan alignment of feature information and graph information, which is motivated\nmainly by our derived lower bound of node-level Rademacher complexity for GNNs.\nAdditionally, our proposed approach incorporates sparse dimensional reduction\nto leverage low-dimensional node features that are relevant to the graph\nstructure. To evaluate the effectiveness of our approach, we conduct\nexperiments on real-world graphs. The results demonstrate that our proposed GSL\nmethod outperforms several competitive baselines, especially in scenarios where\nthe graph structures are heavily affected by noise. Overall, our research\nhighlights the importance of integrating feature and graph information\nalignment in GSL, as inspired by our derived theoretical result, and showcases\nthe superiority of our approach in handling noisy graph structures through\ncomprehensive experiments on real-world datasets.", + "authors": "Shaogao Lv, Gang Wen, Shiyu Liu, Linsen Wei, Ming Li", + "published": "2023-07-05", + "updated": "2023-07-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.04923v2", + "title": "Control-based Graph Embeddings with Data Augmentation for Contrastive Learning", + "abstract": "In this paper, we study the problem of unsupervised graph representation\nlearning by harnessing the control properties of dynamical networks defined on\ngraphs. Our approach introduces a novel framework for contrastive learning, a\nwidely prevalent technique for unsupervised representation learning. A crucial\nstep in contrastive learning is the creation of 'augmented' graphs from the\ninput graphs. Though different from the original graphs, these augmented graphs\nretain the original graph's structural characteristics. Here, we propose a\nunique method for generating these augmented graphs by leveraging the control\nproperties of networks. The core concept revolves around perturbing the\noriginal graph to create a new one while preserving the controllability\nproperties specific to networks and graphs. Compared to the existing methods,\nwe demonstrate that this innovative approach enhances the effectiveness of\ncontrastive learning frameworks, leading to superior results regarding the\naccuracy of the classification tasks. The key innovation lies in our ability to\ndecode the network structure using these control properties, opening new\navenues for unsupervised graph representation learning.", + "authors": "Obaid Ullah Ahmad, Anwar Said, Mudassir Shabbir, Waseem Abbas, Xenofon Koutsoukos", + "published": "2024-03-07", + "updated": "2024-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.MA", + "cs.SY", + "eess.SY" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1911.05954v3", + "title": "Hierarchical Graph Pooling with Structure Learning", + "abstract": "Graph Neural Networks (GNNs), which generalize deep neural networks to\ngraph-structured data, have drawn considerable attention and achieved\nstate-of-the-art performance in numerous graph related tasks. However, existing\nGNN models mainly focus on designing graph convolution operations. The graph\npooling (or downsampling) operations, that play an important role in learning\nhierarchical representations, are usually overlooked. In this paper, we propose\na novel graph pooling operator, called Hierarchical Graph Pooling with\nStructure Learning (HGP-SL), which can be integrated into various graph neural\nnetwork architectures. HGP-SL incorporates graph pooling and structure learning\ninto a unified module to generate hierarchical representations of graphs. More\nspecifically, the graph pooling operation adaptively selects a subset of nodes\nto form an induced subgraph for the subsequent layers. To preserve the\nintegrity of graph's topological information, we further introduce a structure\nlearning mechanism to learn a refined graph structure for the pooled graph at\neach layer. By combining HGP-SL operator with graph neural networks, we perform\ngraph level representation learning with focus on graph classification task.\nExperimental results on six widely used benchmarks demonstrate the\neffectiveness of our proposed model.", + "authors": "Zhen Zhang, Jiajun Bu, Martin Ester, Jianfeng Zhang, Chengwei Yao, Zhi Yu, Can Wang", + "published": "2019-11-14", + "updated": "2019-12-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2209.07817v2", + "title": "SPGP: Structure Prototype Guided Graph Pooling", + "abstract": "While graph neural networks (GNNs) have been successful for node\nclassification tasks and link prediction tasks in graph, learning graph-level\nrepresentations still remains a challenge. For the graph-level representation,\nit is important to learn both representation of neighboring nodes, i.e.,\naggregation, and graph structural information. A number of graph pooling\nmethods have been developed for this goal. However, most of the existing\npooling methods utilize k-hop neighborhood without considering explicit\nstructural information in a graph. In this paper, we propose Structure\nPrototype Guided Pooling (SPGP) that utilizes prior graph structures to\novercome the limitation. SPGP formulates graph structures as learnable\nprototype vectors and computes the affinity between nodes and prototype\nvectors. This leads to a novel node scoring scheme that prioritizes informative\nnodes while encapsulating the useful structures of the graph. Our experimental\nresults show that SPGP outperforms state-of-the-art graph pooling methods on\ngraph classification benchmark datasets in both accuracy and scalability.", + "authors": "Sangseon Lee, Dohoon Lee, Yinhua Piao, Sun Kim", + "published": "2022-09-16", + "updated": "2023-03-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.14002v1", + "title": "Bi-Level Graph Neural Networks for Drug-Drug Interaction Prediction", + "abstract": "We introduce Bi-GNN for modeling biological link prediction tasks such as\ndrug-drug interaction (DDI) and protein-protein interaction (PPI). Taking\ndrug-drug interaction as an example, existing methods using machine learning\neither only utilize the link structure between drugs without using the graph\nrepresentation of each drug molecule, or only leverage the individual drug\ncompound structures without using graph structure for the higher-level DDI\ngraph. The key idea of our method is to fundamentally view the data as a\nbi-level graph, where the highest level graph represents the interaction\nbetween biological entities (interaction graph), and each biological entity\nitself is further expanded to its intrinsic graph representation\n(representation graphs), where the graph is either flat like a drug compound or\nhierarchical like a protein with amino acid level graph, secondary structure,\ntertiary structure, etc. Our model not only allows the usage of information\nfrom both the high-level interaction graph and the low-level representation\ngraphs, but also offers a baseline for future research opportunities to address\nthe bi-level nature of the data.", + "authors": "Yunsheng Bai, Ken Gu, Yizhou Sun, Wei Wang", + "published": "2020-06-11", + "updated": "2020-06-11", + "primary_cat": "cs.CE", + "cats": [ + "cs.CE", + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.01743v1", + "title": "Graph Generation with Variational Recurrent Neural Network", + "abstract": "Generating graph structures is a challenging problem due to the diverse\nrepresentations and complex dependencies among nodes. In this paper, we\nintroduce Graph Variational Recurrent Neural Network (GraphVRNN), a\nprobabilistic autoregressive model for graph generation. Through modeling the\nlatent variables of graph data, GraphVRNN can capture the joint distributions\nof graph structures and the underlying node attributes. We conduct experiments\non the proposed GraphVRNN in both graph structure learning and attribute\ngeneration tasks. The evaluation results show that the variational component\nallows our network to model complicated distributions, as well as generate\nplausible structures and node attributes.", + "authors": "Shih-Yang Su, Hossein Hajimirsadeghi, Greg Mori", + "published": "2019-10-02", + "updated": "2019-10-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.07699v2", + "title": "Time-aware Graph Structure Learning via Sequence Prediction on Temporal Graphs", + "abstract": "Temporal Graph Learning, which aims to model the time-evolving nature of\ngraphs, has gained increasing attention and achieved remarkable performance\nrecently. However, in reality, graph structures are often incomplete and noisy,\nwhich hinders temporal graph networks (TGNs) from learning informative\nrepresentations. Graph contrastive learning uses data augmentation to generate\nplausible variations of existing data and learn robust representations.\nHowever, rule-based augmentation approaches may be suboptimal as they lack\nlearnability and fail to leverage rich information from downstream tasks. To\naddress these issues, we propose a Time-aware Graph Structure Learning (TGSL)\napproach via sequence prediction on temporal graphs, which learns better graph\nstructures for downstream tasks through adding potential temporal edges. In\nparticular, it predicts time-aware context embedding based on previously\nobserved interactions and uses the Gumble-Top-K to select the closest candidate\nedges to this context embedding. Additionally, several candidate sampling\nstrategies are proposed to ensure both efficiency and diversity. Furthermore,\nwe jointly learn the graph structure and TGNs in an end-to-end manner and\nperform inference on the refined graph. Extensive experiments on temporal link\nprediction benchmarks demonstrate that TGSL yields significant gains for the\npopular TGNs such as TGAT and GraphMixer, and it outperforms other contrastive\nlearning methods on temporal graphs. We release the code at\nhttps://github.com/ViktorAxelsen/TGSL.", + "authors": "Haozhen Zhang, Xueting Han, Xi Xiao, Jing Bai", + "published": "2023-06-13", + "updated": "2023-08-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.09792v1", + "title": "A Unified Framework for Structured Graph Learning via Spectral Constraints", + "abstract": "Graph learning from data represents a canonical problem that has received\nsubstantial attention in the literature. However, insufficient work has been\ndone in incorporating prior structural knowledge onto the learning of\nunderlying graphical models from data. Learning a graph with a specific\nstructure is essential for interpretability and identification of the\nrelationships among data. Useful structured graphs include the multi-component\ngraph, bipartite graph, connected graph, sparse graph, and regular graph. In\ngeneral, structured graph learning is an NP-hard combinatorial problem,\ntherefore, designing a general tractable optimization method is extremely\nchallenging. In this paper, we introduce a unified graph learning framework\nlying at the integration of Gaussian graphical models and spectral graph\ntheory. To impose a particular structure on a graph, we first show how to\nformulate the combinatorial constraints as an analytical property of the graph\nmatrix. Then we develop an optimization framework that leverages graph learning\nwith specific structures via spectral constraints on graph matrices. The\nproposed algorithms are provably convergent, computationally efficient, and\npractically amenable for numerous graph-based tasks. Extensive numerical\nexperiments with both synthetic and real data sets illustrate the effectiveness\nof the proposed algorithms. The code for all the simulations is made available\nas an open source repository.", + "authors": "Sandeep Kumar, Jiaxi Ying, Jos\u00e9 Vin\u00edcius de M. Cardoso, Daniel Palomar", + "published": "2019-04-22", + "updated": "2019-04-22", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "cs.SI", + "math.OC" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1901.07439v1", + "title": "Multiple Graph Adversarial Learning", + "abstract": "Recently, Graph Convolutional Networks (GCNs) have been widely studied for\ngraph-structured data representation and learning. However, in many real\napplications, data are coming with multiple graphs, and it is non-trivial to\nadapt GCNs to deal with data representation with multiple graph structures. One\nmain challenge for multi-graph representation is how to exploit both structure\ninformation of each individual graph and correlation information across\nmultiple graphs simultaneously. In this paper, we propose a novel Multiple\nGraph Adversarial Learning (MGAL) framework for multi-graph representation and\nlearning. MGAL aims to learn an optimal structure-invariant and consistent\nrepresentation for multiple graphs in a common subspace via a novel adversarial\nlearning framework, which thus incorporates both structure information of\nintra-graph and correlation information of inter-graphs simultaneously. Based\non MGAL, we then provide a unified network for semi-supervised learning task.\nPromising experimental results demonstrate the effectiveness of MGAL model.", + "authors": "Bo Jiang, Ziyan Zhang, Jin Tang, Bin Luo", + "published": "2019-01-22", + "updated": "2019-01-22", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2109.11898v1", + "title": "Graph Learning Augmented Heterogeneous Graph Neural Network for Social Recommendation", + "abstract": "Social recommendation based on social network has achieved great success in\nimproving the performance of recommendation system. Since social network\n(user-user relations) and user-item interactions are both naturally represented\nas graph-structured data, Graph Neural Networks (GNNs) have thus been widely\napplied for social recommendation. In this work, we propose an end-to-end\nheterogeneous global graph learning framework, namely Graph Learning Augmented\nHeterogeneous Graph Neural Network (GL-HGNN) for social recommendation. GL-HGNN\naims to learn a heterogeneous global graph that makes full use of user-user\nrelations, user-item interactions and item-item similarities in a unified\nperspective. To this end, we design a Graph Learner (GL) method to learn and\noptimize user-user and item-item connections separately. Moreover, we employ a\nHeterogeneous Graph Neural Network (HGNN) to capture the high-order complex\nsemantic relations from our learned heterogeneous global graph. To scale up the\ncomputation of graph learning, we further present the Anchor-based Graph\nLearner (AGL) to reduce computational complexity. Extensive experiments on four\nreal-world datasets demonstrate the effectiveness of our model.", + "authors": "Yiming Zhang, Lingfei Wu, Qi Shen, Yitong Pang, Zhihua Wei, Fangli Xu, Ethan Chang, Bo Long", + "published": "2021-09-24", + "updated": "2021-09-24", + "primary_cat": "cs.IR", + "cats": [ + "cs.IR" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.11307v3", + "title": "Transforming Graphs for Enhanced Attribute Clustering: An Innovative Graph Transformer-Based Method", + "abstract": "Graph Representation Learning (GRL) is an influential methodology, enabling a\nmore profound understanding of graph-structured data and aiding graph\nclustering, a critical task across various domains. The recent incursion of\nattention mechanisms, originally an artifact of Natural Language Processing\n(NLP), into the realm of graph learning has spearheaded a notable shift in\nresearch trends. Consequently, Graph Attention Networks (GATs) and Graph\nAttention Auto-Encoders have emerged as preferred tools for graph clustering\ntasks. Yet, these methods primarily employ a local attention mechanism, thereby\ncurbing their capacity to apprehend the intricate global dependencies between\nnodes within graphs. Addressing these impediments, this study introduces an\ninnovative method known as the Graph Transformer Auto-Encoder for Graph\nClustering (GTAGC). By melding the Graph Auto-Encoder with the Graph\nTransformer, GTAGC is adept at capturing global dependencies between nodes.\nThis integration amplifies the graph representation and surmounts the\nconstraints posed by the local attention mechanism. The architecture of GTAGC\nencompasses graph embedding, integration of the Graph Transformer within the\nautoencoder structure, and a clustering component. It strategically alternates\nbetween graph embedding and clustering, thereby tailoring the Graph Transformer\nfor clustering tasks, whilst preserving the graph's global structural\ninformation. Through extensive experimentation on diverse benchmark datasets,\nGTAGC has exhibited superior performance against existing state-of-the-art\ngraph clustering methodologies.", + "authors": "Shuo Han, Jiacheng Liu, Jiayun Wu, Yinan Chen, Li Tao", + "published": "2023-06-20", + "updated": "2023-08-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2210.02060v1", + "title": "Graph Classification via Discriminative Edge Feature Learning", + "abstract": "Spectral graph convolutional neural networks (GCNNs) have been producing\nencouraging results in graph classification tasks. However, most spectral GCNNs\nutilize fixed graphs when aggregating node features, while omitting edge\nfeature learning and failing to get an optimal graph structure. Moreover, many\nexisting graph datasets do not provide initialized edge features, further\nrestraining the ability of learning edge features via spectral GCNNs. In this\npaper, we try to address this issue by designing an edge feature scheme and an\nadd-on layer between every two stacked graph convolution layers in GCNN. Both\nare lightweight while effective in filling the gap between edge feature\nlearning and performance enhancement of graph classification. The edge feature\nscheme makes edge features adapt to node representations at different graph\nconvolution layers. The add-on layers help adjust the edge features to an\noptimal graph structure. To test the effectiveness of our method, we take\nEuclidean positions as initial node features and extract graphs with semantic\ninformation from point cloud objects. The node features of our extracted graphs\nare more scalable for edge feature learning than most existing graph datasets\n(in one-hot encoded label format). Three new graph datasets are constructed\nbased on ModelNet40, ModelNet10 and ShapeNet Part datasets. Experimental\nresults show that our method outperforms state-of-the-art graph classification\nmethods on the new datasets by reaching 96.56% overall accuracy on\nGraph-ModelNet40, 98.79% on Graph-ModelNet10 and 97.91% on Graph-ShapeNet Part.\nThe constructed graph datasets will be released to the community.", + "authors": "Yang Yi, Xuequan Lu, Shang Gao, Antonio Robles-Kelly, Yuejie Zhang", + "published": "2022-10-05", + "updated": "2022-10-05", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1611.04687v2", + "title": "Intrinsic Geometric Information Transfer Learning on Multiple Graph-Structured Datasets", + "abstract": "Graphs provide a powerful means for representing complex interactions between\nentities. Recently, deep learning approaches are emerging for representing and\nmodeling graph-structured data, although the conventional deep learning methods\n(such as convolutional neural networks and recurrent neural networks) have\nmainly focused on grid-structured inputs (image and audio). Leveraged by the\ncapability of representation learning, deep learning based techniques are\nreporting promising results for graph applications by detecting structural\ncharacteristics of graphs in an automated fashion. In this paper, we attempt to\nadvance deep learning for graph-structured data by incorporating another\ncomponent, transfer learning. By transferring the intrinsic geometric\ninformation learned in the source domain, our approach can help us to construct\na model for a new but related task in the target domain without collecting new\ndata and without training a new model from scratch. We thoroughly test our\napproach with large-scale real corpora and confirm the effectiveness of the\nproposed transfer learning framework for deep learning on graphs. According to\nour experiments, transfer learning is most effective when the source and target\ndomains bear a high level of structural similarity in their graph\nrepresentations.", + "authors": "Jaekoo Lee, Hyunjae Kim, Jongsun Lee, Sungroh Yoon", + "published": "2016-11-15", + "updated": "2016-12-05", + "primary_cat": "cs.NE", + "cats": [ + "cs.NE" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2402.16374v2", + "title": "Graph Learning under Distribution Shifts: A Comprehensive Survey on Domain Adaptation, Out-of-distribution, and Continual Learning", + "abstract": "Graph learning plays a pivotal role and has gained significant attention in\nvarious application scenarios, from social network analysis to recommendation\nsystems, for its effectiveness in modeling complex data relations represented\nby graph structural data. In reality, the real-world graph data typically show\ndynamics over time, with changing node attributes and edge structure, leading\nto the severe graph data distribution shift issue. This issue is compounded by\nthe diverse and complex nature of distribution shifts, which can significantly\nimpact the performance of graph learning methods in degraded generalization and\nadaptation capabilities, posing a substantial challenge to their effectiveness.\nIn this survey, we provide a comprehensive review and summary of the latest\napproaches, strategies, and insights that address distribution shifts within\nthe context of graph learning. Concretely, according to the observability of\ndistributions in the inference stage and the availability of sufficient\nsupervision information in the training stage, we categorize existing graph\nlearning methods into several essential scenarios, including graph domain\nadaptation learning, graph out-of-distribution learning, and graph continual\nlearning. For each scenario, a detailed taxonomy is proposed, with specific\ndescriptions and discussions of existing progress made in distribution-shifted\ngraph learning. Additionally, we discuss the potential applications and future\ndirections for graph learning under distribution shifts with a systematic\nanalysis of the current state in this field. The survey is positioned to\nprovide general guidance for the development of effective graph learning\nalgorithms in handling graph distribution shifts, and to stimulate future\nresearch and advancements in this area.", + "authors": "Man Wu, Xin Zheng, Qin Zhang, Xiao Shen, Xiong Luo, Xingquan Zhu, Shirui Pan", + "published": "2024-02-26", + "updated": "2024-03-07", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2108.04595v1", + "title": "Label-informed Graph Structure Learning for Node Classification", + "abstract": "Graph Neural Networks (GNNs) have achieved great success among various\ndomains. Nevertheless, most GNN methods are sensitive to the quality of graph\nstructures. To tackle this problem, some studies exploit different graph\nstructure learning strategies to refine the original graph structure. However,\nthese methods only consider feature information while ignoring available label\ninformation. In this paper, we propose a novel label-informed graph structure\nlearning framework which incorporates label information explicitly through a\nclass transition matrix. We conduct extensive experiments on seven node\nclassification benchmark datasets and the results show that our method\noutperforms or matches the state-of-the-art baselines.", + "authors": "Liping Wang, Fenyu Hu, Shu Wu, Liang Wang", + "published": "2021-08-10", + "updated": "2021-08-10", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2011.01412v1", + "title": "Sampling and Recovery of Graph Signals based on Graph Neural Networks", + "abstract": "We propose interpretable graph neural networks for sampling and recovery of\ngraph signals, respectively. To take informative measurements, we propose a new\ngraph neural sampling module, which aims to select those vertices that\nmaximally express their corresponding neighborhoods. Such expressiveness can be\nquantified by the mutual information between vertices' features and\nneighborhoods' features, which are estimated via a graph neural network. To\nreconstruct an original graph signal from the sampled measurements, we propose\na graph neural recovery module based on the algorithm-unrolling technique.\nCompared to previous analytical sampling and recovery, the proposed methods are\nable to flexibly learn a variety of graph signal models from data by leveraging\nthe learning ability of neural networks; compared to previous\nneural-network-based sampling and recovery, the proposed methods are designed\nthrough exploiting specific graph properties and provide interpretability. We\nfurther design a new multiscale graph neural network, which is a trainable\nmultiscale graph filter bank and can handle various graph-related learning\ntasks. The multiscale network leverages the proposed graph neural sampling and\nrecovery modules to achieve multiscale representations of a graph. In the\nexperiments, we illustrate the effects of the proposed graph neural sampling\nand recovery modules and find that the modules can flexibly adapt to various\ngraph structures and graph signals. In the task of active-sampling-based\nsemi-supervised learning, the graph neural sampling module improves the\nclassification accuracy over 10% in Cora dataset. We further validate the\nproposed multiscale graph neural network on several standard datasets for both\nvertex and graph classification. The results show that our method consistently\nimproves the classification accuracies.", + "authors": "Siheng Chen, Maosen Li, Ya Zhang", + "published": "2020-11-03", + "updated": "2020-11-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.02879v1", + "title": "Auto-decoding Graphs", + "abstract": "We present an approach to synthesizing new graph structures from empirically\nspecified distributions. The generative model is an auto-decoder that learns to\nsynthesize graphs from latent codes. The graph synthesis model is learned\njointly with an empirical distribution over the latent codes. Graphs are\nsynthesized using self-attention modules that are trained to identify likely\nconnectivity patterns. Graph-based normalizing flows are used to sample latent\ncodes from the distribution learned by the auto-decoder. The resulting model\ncombines accuracy and scalability. On benchmark datasets of large graphs, the\npresented model outperforms the state of the art by a factor of 1.5 in mean\naccuracy and average rank across at least three different graph statistics,\nwith a 2x speedup during inference.", + "authors": "Sohil Atul Shah, Vladlen Koltun", + "published": "2020-06-04", + "updated": "2020-06-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.11883v2", + "title": "Robust Graph Data Learning via Latent Graph Convolutional Representation", + "abstract": "Graph Convolutional Representation (GCR) has achieved impressive performance\nfor graph data representation. However, existing GCR is generally defined on\nthe input fixed graph which may restrict the representation capacity and also\nbe vulnerable to the structural attacks and noises. To address this issue, we\npropose a novel Latent Graph Convolutional Representation (LatGCR) for robust\ngraph data representation and learning. Our LatGCR is derived based on\nreformulating graph convolutional representation from the aspect of graph\nneighborhood reconstruction. Given an input graph $\\textbf{A}$, LatGCR aims to\ngenerate a flexible latent graph $\\widetilde{\\textbf{A}}$ for graph\nconvolutional representation which obviously enhances the representation\ncapacity and also performs robustly w.r.t graph structural attacks and noises.\nMoreover, LatGCR is implemented in a self-supervised manner and thus provides a\nbasic block for both supervised and unsupervised graph learning tasks.\nExperiments on several datasets demonstrate the effectiveness and robustness of\nLatGCR.", + "authors": "Bo Jiang, Ziyan Zhang, Bin Luo", + "published": "2019-04-26", + "updated": "2021-10-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2305.15843v1", + "title": "TabGSL: Graph Structure Learning for Tabular Data Prediction", + "abstract": "This work presents a novel approach to tabular data prediction leveraging\ngraph structure learning and graph neural networks. Despite the prevalence of\ntabular data in real-world applications, traditional deep learning methods\noften overlook the potentially valuable associations between data instances.\nSuch associations can offer beneficial insights for classification tasks, as\ninstances may exhibit similar patterns of correlations among features and\ntarget labels. This information can be exploited by graph neural networks,\nnecessitating robust graph structures. However, existing studies primarily\nfocus on improving graph structure from noisy data, largely neglecting the\npossibility of deriving graph structures from tabular data. We present a novel\nsolution, Tabular Graph Structure Learning (TabGSL), to enhance tabular data\nprediction by simultaneously learning instance correlation and feature\ninteraction within a unified framework. This is achieved through a proposed\ngraph contrastive learning module, along with transformer-based feature\nextractor and graph neural network. Comprehensive experiments conducted on 30\nbenchmark tabular datasets demonstrate that TabGSL markedly outperforms both\ntree-based models and recent deep learning-based tabular models. Visualizations\nof the learned instance embeddings further substantiate the effectiveness of\nTabGSL.", + "authors": "Jay Chiehen Liao, Cheng-Te Li", + "published": "2023-05-25", + "updated": "2023-05-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2201.07409v2", + "title": "Dual Space Graph Contrastive Learning", + "abstract": "Unsupervised graph representation learning has emerged as a powerful tool to\naddress real-world problems and achieves huge success in the graph learning\ndomain. Graph contrastive learning is one of the unsupervised graph\nrepresentation learning methods, which recently attracts attention from\nresearchers and has achieved state-of-the-art performances on various tasks.\nThe key to the success of graph contrastive learning is to construct proper\ncontrasting pairs to acquire the underlying structural semantics of the graph.\nHowever, this key part is not fully explored currently, most of the ways\ngenerating contrasting pairs focus on augmenting or perturbating graph\nstructures to obtain different views of the input graph. But such strategies\ncould degrade the performances via adding noise into the graph, which may\nnarrow down the field of the applications of graph contrastive learning. In\nthis paper, we propose a novel graph contrastive learning method, namely\n\\textbf{D}ual \\textbf{S}pace \\textbf{G}raph \\textbf{C}ontrastive (DSGC)\nLearning, to conduct graph contrastive learning among views generated in\ndifferent spaces including the hyperbolic space and the Euclidean space. Since\nboth spaces have their own advantages to represent graph data in the embedding\nspaces, we hope to utilize graph contrastive learning to bridge the spaces and\nleverage advantages from both sides. The comparison experiment results show\nthat DSGC achieves competitive or better performances among all the datasets.\nIn addition, we conduct extensive experiments to analyze the impact of\ndifferent graph encoders on DSGC, giving insights about how to better leverage\nthe advantages of contrastive learning between different spaces.", + "authors": "Haoran Yang, Hongxu Chen, Shirui Pan, Lin Li, Philip S. Yu, Guandong Xu", + "published": "2022-01-19", + "updated": "2022-03-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2202.08235v3", + "title": "Data Augmentation for Deep Graph Learning: A Survey", + "abstract": "Graph neural networks, a powerful deep learning tool to model\ngraph-structured data, have demonstrated remarkable performance on numerous\ngraph learning tasks. To address the data noise and data scarcity issues in\ndeep graph learning, the research on graph data augmentation has intensified\nlately. However, conventional data augmentation methods can hardly handle\ngraph-structured data which is defined in non-Euclidean space with\nmulti-modality. In this survey, we formally formulate the problem of graph data\naugmentation and further review the representative techniques and their\napplications in different deep graph learning problems. Specifically, we first\npropose a taxonomy for graph data augmentation techniques and then provide a\nstructured review by categorizing the related work based on the augmented\ninformation modalities. Moreover, we summarize the applications of graph data\naugmentation in two representative problems in data-centric deep graph\nlearning: (1) reliable graph learning which focuses on enhancing the utility of\ninput graph as well as the model capacity via graph data augmentation; and (2)\nlow-resource graph learning which targets on enlarging the labeled training\ndata scale through graph data augmentation. For each problem, we also provide a\nhierarchical problem taxonomy and review the existing literature related to\ngraph data augmentation. Finally, we point out promising research directions\nand the challenges in future research.", + "authors": "Kaize Ding, Zhe Xu, Hanghang Tong, Huan Liu", + "published": "2022-02-16", + "updated": "2022-11-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.15239v1", + "title": "Generating the Graph Gestalt: Kernel-Regularized Graph Representation Learning", + "abstract": "Recent work on graph generative models has made remarkable progress towards\ngenerating increasingly realistic graphs, as measured by global graph features\nsuch as degree distribution, density, and clustering coefficients. Deep\ngenerative models have also made significant advances through better modelling\nof the local correlations in the graph topology, which have been very useful\nfor predicting unobserved graph components, such as the existence of a link or\nthe class of a node, from nearby observed graph components. A complete\nscientific understanding of graph data should address both global and local\nstructure. In this paper, we propose a joint model for both as complementary\nobjectives in a graph VAE framework. Global structure is captured by\nincorporating graph kernels in a probabilistic model whose loss function is\nclosely related to the maximum mean discrepancy(MMD) between the global\nstructures of the reconstructed and the input graphs. The ELBO objective\nderived from the model regularizes a standard local link reconstruction term\nwith an MMD term. Our experiments demonstrate a significant improvement in the\nrealism of the generated graph structures, typically by 1-2 orders of magnitude\nof graph structure metrics, compared to leading graph VAEand GAN models. Local\nlink reconstruction improves as well in many cases.", + "authors": "Kiarash Zahirnia, Ankita Sakhuja, Oliver Schulte, Parmis Nadaf, Ke Li, Xia Hu", + "published": "2021-06-29", + "updated": "2021-06-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.02664v2", + "title": "Structure-free Graph Condensation: From Large-scale Graphs to Condensed Graph-free Data", + "abstract": "Graph condensation, which reduces the size of a large-scale graph by\nsynthesizing a small-scale condensed graph as its substitution, has immediate\nbenefits for various graph learning tasks. However, existing graph condensation\nmethods rely on the joint optimization of nodes and structures in the condensed\ngraph, and overlook critical issues in effectiveness and generalization\nability. In this paper, we advocate a new Structure-Free Graph Condensation\nparadigm, named SFGC, to distill a large-scale graph into a small-scale graph\nnode set without explicit graph structures, i.e., graph-free data. Our idea is\nto implicitly encode topology structure information into the node attributes in\nthe synthesized graph-free data, whose topology is reduced to an identity\nmatrix. Specifically, SFGC contains two collaborative components: (1) a\ntraining trajectory meta-matching scheme for effectively synthesizing\nsmall-scale graph-free data; (2) a graph neural feature score metric for\ndynamically evaluating the quality of the condensed data. Through training\ntrajectory meta-matching, SFGC aligns the long-term GNN learning behaviors\nbetween the large-scale graph and the condensed small-scale graph-free data,\nensuring comprehensive and compact transfer of informative knowledge to the\ngraph-free data. Afterward, the underlying condensed graph-free data would be\ndynamically evaluated with the graph neural feature score, which is a\nclosed-form metric for ensuring the excellent expressiveness of the condensed\ngraph-free data. Extensive experiments verify the superiority of SFGC across\ndifferent condensation ratios.", + "authors": "Xin Zheng, Miao Zhang, Chunyang Chen, Quoc Viet Hung Nguyen, Xingquan Zhu, Shirui Pan", + "published": "2023-06-05", + "updated": "2023-10-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2211.07970v1", + "title": "Adaptive Multi-Neighborhood Attention based Transformer for Graph Representation Learning", + "abstract": "By incorporating the graph structural information into Transformers, graph\nTransformers have exhibited promising performance for graph representation\nlearning in recent years. Existing graph Transformers leverage specific\nstrategies, such as Laplacian eigenvectors and shortest paths of the node\npairs, to preserve the structural features of nodes and feed them into the\nvanilla Transformer to learn the representations of nodes. It is hard for such\npredefined rules to extract informative graph structural features for arbitrary\ngraphs whose topology structure varies greatly, limiting the learning capacity\nof the models. To this end, we propose an adaptive graph Transformer, termed\nMulti-Neighborhood Attention based Graph Transformer (MNA-GT), which captures\nthe graph structural information for each node from the multi-neighborhood\nattention mechanism adaptively. By defining the input to perform scaled-dot\nproduct as an attention kernel, MNA-GT constructs multiple attention kernels\nbased on different hops of neighborhoods such that each attention kernel can\ncapture specific graph structural information of the corresponding neighborhood\nfor each node pair. In this way, MNA-GT can preserve the graph structural\ninformation efficiently by incorporating node representations learned by\ndifferent attention kernels. MNA-GT further employs an attention layer to learn\nthe importance of different attention kernels to enable the model to adaptively\ncapture the graph structural information for different nodes. Extensive\nexperiments are conducted on a variety of graph benchmarks, and the empirical\nresults show that MNA-GT outperforms many strong baselines.", + "authors": "Gaichao Li, Jinsong Chen, Kun He", + "published": "2022-11-15", + "updated": "2022-11-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.00876v1", + "title": "Balanced Graph Structure Information for Brain Disease Detection", + "abstract": "Analyzing connections between brain regions of interest (ROI) is vital to\ndetect neurological disorders such as autism or schizophrenia. Recent\nadvancements employ graph neural networks (GNNs) to utilize graph structures in\nbrains, improving detection performances. Current methods use correlation\nmeasures between ROI's blood-oxygen-level-dependent (BOLD) signals to generate\nthe graph structure. Other methods use the training samples to learn the\noptimal graph structure through end-to-end learning. However, implementing\nthose methods independently leads to some issues with noisy data for the\ncorrelation graphs and overfitting problems for the optimal graph. In this\nwork, we proposed Bargrain (balanced graph structure for brains), which models\ntwo graph structures: filtered correlation matrix and optimal sample graph\nusing graph convolution networks (GCNs). This approach aims to get advantages\nfrom both graphs and address the limitations of only relying on a single type\nof structure. Based on our extensive experiment, Bargrain outperforms\nstate-of-the-art methods in classification tasks on brain disease datasets, as\nmeasured by average F1 scores.", + "authors": "Falih Gozi Febrinanto, Mujie Liu, Feng Xia", + "published": "2023-12-30", + "updated": "2023-12-30", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "q-bio.NC" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1902.10042v2", + "title": "Graph Neural Processes: Towards Bayesian Graph Neural Networks", + "abstract": "We introduce Graph Neural Processes (GNP), inspired by the recent work in\nconditional and latent neural processes. A Graph Neural Process is defined as a\nConditional Neural Process that operates on arbitrary graph data. It takes\nfeatures of sparsely observed context points as input, and outputs a\ndistribution over target points. We demonstrate graph neural processes in edge\nimputation and discuss benefits and drawbacks of the method for other\napplication areas. One major benefit of GNPs is the ability to quantify\nuncertainty in deep learning on graph structures. An additional benefit of this\nmethod is the ability to extend graph neural networks to inputs of dynamic\nsized graphs.", + "authors": "Andrew Carr, David Wingate", + "published": "2019-02-26", + "updated": "2019-10-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.08201v1", + "title": "Graph Laplacian Learning with Exponential Family Noise", + "abstract": "A common challenge in applying graph machine learning methods is that the\nunderlying graph of a system is often unknown. Although different graph\ninference methods have been proposed for continuous graph signals, inferring\nthe graph structure underlying other types of data, such as discrete counts, is\nunder-explored. In this paper, we generalize a graph signal processing (GSP)\nframework for learning a graph from smooth graph signals to the exponential\nfamily noise distribution to model various data types. We propose an\nalternating algorithm that estimates the graph Laplacian as well as the\nunobserved smooth representation from the noisy signals. We demonstrate in\nsynthetic and real-world data that our new algorithm outperforms competing\nLaplacian estimation methods under noise model mismatch.", + "authors": "Changhao Shi, Gal Mishne", + "published": "2023-06-14", + "updated": "2023-06-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1801.03226v1", + "title": "Adaptive Graph Convolutional Neural Networks", + "abstract": "Graph Convolutional Neural Networks (Graph CNNs) are generalizations of\nclassical CNNs to handle graph data such as molecular data, point could and\nsocial networks. Current filters in graph CNNs are built for fixed and shared\ngraph structure. However, for most real data, the graph structures varies in\nboth size and connectivity. The paper proposes a generalized and flexible graph\nCNN taking data of arbitrary graph structure as input. In that way a\ntask-driven adaptive graph is learned for each graph data while training. To\nefficiently learn the graph, a distance metric learning is proposed. Extensive\nexperiments on nine graph-structured datasets have demonstrated the superior\nperformance improvement on both convergence speed and predictive accuracy.", + "authors": "Ruoyu Li, Sheng Wang, Feiyun Zhu, Junzhou Huang", + "published": "2018-01-10", + "updated": "2018-01-10", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2012.05980v1", + "title": "CommPOOL: An Interpretable Graph Pooling Framework for Hierarchical Graph Representation Learning", + "abstract": "Recent years have witnessed the emergence and flourishing of hierarchical\ngraph pooling neural networks (HGPNNs) which are effective graph representation\nlearning approaches for graph level tasks such as graph classification.\nHowever, current HGPNNs do not take full advantage of the graph's intrinsic\nstructures (e.g., community structure). Moreover, the pooling operations in\nexisting HGPNNs are difficult to be interpreted. In this paper, we propose a\nnew interpretable graph pooling framework - CommPOOL, that can capture and\npreserve the hierarchical community structure of graphs in the graph\nrepresentation learning process. Specifically, the proposed community pooling\nmechanism in CommPOOL utilizes an unsupervised approach for capturing the\ninherent community structure of graphs in an interpretable manner. CommPOOL is\na general and flexible framework for hierarchical graph representation learning\nthat can further facilitate various graph-level tasks. Evaluations on five\npublic benchmark datasets and one synthetic dataset demonstrate the superior\nperformance of CommPOOL in graph representation learning for graph\nclassification compared to the state-of-the-art baseline methods, and its\neffectiveness in capturing and preserving the community structure of graphs.", + "authors": "Haoteng Tang, Guixiang Ma, Lifang He, Heng Huang, Liang Zhan", + "published": "2020-12-10", + "updated": "2020-12-10", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2006.13009v2", + "title": "Iterative Deep Graph Learning for Graph Neural Networks: Better and Robust Node Embeddings", + "abstract": "In this paper, we propose an end-to-end graph learning framework, namely\nIterative Deep Graph Learning (IDGL), for jointly and iteratively learning\ngraph structure and graph embedding. The key rationale of IDGL is to learn a\nbetter graph structure based on better node embeddings, and vice versa (i.e.,\nbetter node embeddings based on a better graph structure). Our iterative method\ndynamically stops when the learned graph structure approaches close enough to\nthe graph optimized for the downstream prediction task. In addition, we cast\nthe graph learning problem as a similarity metric learning problem and leverage\nadaptive graph regularization for controlling the quality of the learned graph.\nFinally, combining the anchor-based approximation technique, we further propose\na scalable version of IDGL, namely IDGL-Anch, which significantly reduces the\ntime and space complexity of IDGL without compromising the performance. Our\nextensive experiments on nine benchmarks show that our proposed IDGL models can\nconsistently outperform or match the state-of-the-art baselines. Furthermore,\nIDGL can be more robust to adversarial graphs and cope with both transductive\nand inductive learning.", + "authors": "Yu Chen, Lingfei Wu, Mohammed J. Zaki", + "published": "2020-06-21", + "updated": "2020-10-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1909.11594v1", + "title": "Structured Graph Learning Via Laplacian Spectral Constraints", + "abstract": "Learning a graph with a specific structure is essential for interpretability\nand identification of the relationships among data. It is well known that\nstructured graph learning from observed samples is an NP-hard combinatorial\nproblem. In this paper, we first show that for a set of important graph\nfamilies it is possible to convert the structural constraints of structure into\neigenvalue constraints of the graph Laplacian matrix. Then we introduce a\nunified graph learning framework, lying at the integration of the spectral\nproperties of the Laplacian matrix with Gaussian graphical modeling that is\ncapable of learning structures of a large class of graph families. The proposed\nalgorithms are provably convergent and practically amenable for large-scale\nsemi-supervised and unsupervised graph-based learning tasks. Extensive\nnumerical experiments with both synthetic and real data sets demonstrate the\neffectiveness of the proposed methods. An R package containing code for all the\nexperimental results is available at\nhttps://cran.r-project.org/package=spectralGraphTopology.", + "authors": "Sandeep Kumar, Jiaxi Ying, Jos'e Vin'icius de M. Cardoso, Daniel P. Palomar", + "published": "2019-09-24", + "updated": "2019-09-24", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "cs.SI", + "math.OC", + "stat.AP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2302.03596v3", + "title": "Graph Generation with Diffusion Mixture", + "abstract": "Generation of graphs is a major challenge for real-world tasks that require\nunderstanding the complex nature of their non-Euclidean structures. Although\ndiffusion models have achieved notable success in graph generation recently,\nthey are ill-suited for modeling the topological properties of graphs since\nlearning to denoise the noisy samples does not explicitly learn the graph\nstructures to be generated. To tackle this limitation, we propose a generative\nframework that models the topology of graphs by explicitly learning the final\ngraph structures of the diffusion process. Specifically, we design the\ngenerative process as a mixture of endpoint-conditioned diffusion processes\nwhich is driven toward the predicted graph that results in rapid convergence.\nWe further introduce a simple parameterization of the mixture process and\ndevelop an objective for learning the final graph structure, which enables\nmaximum likelihood training. Through extensive experimental validation on\ngeneral graph and 2D/3D molecule generation tasks, we show that our method\noutperforms previous generative models, generating graphs with correct topology\nwith both continuous (e.g. 3D coordinates) and discrete (e.g. atom types)\nfeatures. Our code is available at https://github.com/harryjo97/DruM.", + "authors": "Jaehyeong Jo, Dongki Kim, Sung Ju Hwang", + "published": "2023-02-07", + "updated": "2024-02-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.04286v1", + "title": "Deep Unsupervised Active Learning on Learnable Graphs", + "abstract": "Recently deep learning has been successfully applied to unsupervised active\nlearning. However, current method attempts to learn a nonlinear transformation\nvia an auto-encoder while ignoring the sample relation, leaving huge room to\ndesign more effective representation learning mechanisms for unsupervised\nactive learning. In this paper, we propose a novel deep unsupervised Active\nLearning model via Learnable Graphs, named ALLG. ALLG benefits from learning\noptimal graph structures to acquire better sample representation and select\nrepresentative samples. To make the learnt graph structure more stable and\neffective, we take into account $k$-nearest neighbor graph as a priori, and\nlearn a relation propagation graph structure. We also incorporate shortcut\nconnections among different layers, which can alleviate the well-known\nover-smoothing problem to some extent. To the best of our knowledge, this is\nthe first attempt to leverage graph structure learning for unsupervised active\nlearning. Extensive experiments performed on six datasets demonstrate the\nefficacy of our method.", + "authors": "Handong Ma, Changsheng Li, Xinchu Shi, Ye Yuan, Guoren Wang", + "published": "2021-11-08", + "updated": "2021-11-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.13769v1", + "title": "Multiview Graph Learning with Consensus Graph", + "abstract": "Graph topology inference, i.e., learning graphs from a given set of nodal\nobservations, is a significant task in many application domains. Existing\napproaches are mostly limited to learning a single graph assuming that the\nobserved data is homogeneous. This is problematic because many modern datasets\nare heterogeneous or mixed and involve multiple related graphs, i.e., multiview\ngraphs. Recent work proposing to learn multiview graphs ensures the similarity\nof learned view graphs through pairwise regularization, where each pair of\nviews is encouraged to have similar structures. However, this approach cannot\ninfer the shared structure across views. In this work, we propose an\nalternative method based on consensus regularization, where views are ensured\nto be similar through a learned consensus graph representing the common\nstructure of the views. In particular, we propose an optimization problem,\nwhere graph data is assumed to be smooth over the multiview graph and the\ntopology of the individual views and that of the consensus graph are learned,\nsimultaneously. Our optimization problem is designed to be general in the sense\nthat different regularization functions can be used depending on what the\nshared structure across views is. Moreover, we propose two regularization\nfunctions that extend fused and group graphical lasso to consensus based\nregularization. Proposed multiview graph learning is evaluated on simulated\ndata and shown to have better performance than existing methods. It is also\nemployed to infer the functional brain connectivity networks of multiple\nsubjects from their electroencephalogram (EEG) recordings. The proposed method\nreveals the structure shared by subjects as well as the characteristics unique\nto each subject.", + "authors": "Abdullah Karaaslanli, Selin Aviyente", + "published": "2024-01-24", + "updated": "2024-01-24", + "primary_cat": "eess.SP", + "cats": [ + "eess.SP", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2007.16002v1", + "title": "Graph Convolutional Networks using Heat Kernel for Semi-supervised Learning", + "abstract": "Graph convolutional networks gain remarkable success in semi-supervised\nlearning on graph structured data. The key to graph-based semisupervised\nlearning is capturing the smoothness of labels or features over nodes exerted\nby graph structure. Previous methods, spectral methods and spatial methods,\ndevote to defining graph convolution as a weighted average over neighboring\nnodes, and then learn graph convolution kernels to leverage the smoothness to\nimprove the performance of graph-based semi-supervised learning. One open\nchallenge is how to determine appropriate neighborhood that reflects relevant\ninformation of smoothness manifested in graph structure. In this paper, we\npropose GraphHeat, leveraging heat kernel to enhance low-frequency filters and\nenforce smoothness in the signal variation on the graph. GraphHeat leverages\nthe local structure of target node under heat diffusion to determine its\nneighboring nodes flexibly, without the constraint of order suffered by\nprevious methods. GraphHeat achieves state-of-the-art results in the task of\ngraph-based semi-supervised classification across three benchmark datasets:\nCora, Citeseer and Pubmed.", + "authors": "Bingbing Xu, Huawei Shen, Qi Cao, Keting Cen, Xueqi Cheng", + "published": "2020-07-27", + "updated": "2020-07-27", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2309.10134v1", + "title": "GDM: Dual Mixup for Graph Classification with Limited Supervision", + "abstract": "Graph Neural Networks (GNNs) require a large number of labeled graph samples\nto obtain good performance on the graph classification task. The performance of\nGNNs degrades significantly as the number of labeled graph samples decreases.\nTo reduce the annotation cost, it is therefore important to develop graph\naugmentation methods that can generate new graph instances to increase the size\nand diversity of the limited set of available labeled graph samples. In this\nwork, we propose a novel mixup-based graph augmentation method, Graph Dual\nMixup (GDM), that leverages both functional and structural information of the\ngraph instances to generate new labeled graph samples. GDM employs a graph\nstructural auto-encoder to learn structural embeddings of the graph samples,\nand then applies mixup to the structural information of the graphs in the\nlearned structural embedding space and generates new graph structures from the\nmixup structural embeddings. As for the functional information, GDM applies\nmixup directly to the input node features of the graph samples to generate\nfunctional node feature information for new mixup graph instances. Jointly, the\ngenerated input node features and graph structures yield new graph samples\nwhich can supplement the set of original labeled graphs. Furthermore, we\npropose two novel Balanced Graph Sampling methods to enhance the balanced\ndifficulty and diversity for the generated graph samples. Experimental results\non the benchmark datasets demonstrate that our proposed method substantially\noutperforms the state-of-the-art graph augmentation methods when the labeled\ngraphs are scarce.", + "authors": "Abdullah Alchihabi, Yuhong Guo", + "published": "2023-09-18", + "updated": "2023-09-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2311.11821v1", + "title": "Cross-View Graph Consistency Learning for Invariant Graph Representations", + "abstract": "Graph representation learning is fundamental for analyzing graph-structured\ndata. Exploring invariant graph representations remains a challenge for most\nexisting graph representation learning methods. In this paper, we propose a\ncross-view graph consistency learning (CGCL) method that learns invariant graph\nrepresentations for link prediction. First, two complementary augmented views\nare derived from an incomplete graph structure through a bidirectional graph\nstructure augmentation scheme. This augmentation scheme mitigates the potential\ninformation loss that is commonly associated with various data augmentation\ntechniques involving raw graph data, such as edge perturbation, node removal,\nand attribute masking. Second, we propose a CGCL model that can learn invariant\ngraph representations. A cross-view training scheme is proposed to train the\nproposed CGCL model. This scheme attempts to maximize the consistency\ninformation between one augmented view and the graph structure reconstructed\nfrom the other augmented view. Furthermore, we offer a comprehensive\ntheoretical CGCL analysis. This paper empirically and experimentally\ndemonstrates the effectiveness of the proposed CGCL method, achieving\ncompetitive results on graph datasets in comparisons with several\nstate-of-the-art algorithms.", + "authors": "Jie Chen, Zhiming Li, Hua Mao, Wai Lok Woo, Xi Peng", + "published": "2023-11-20", + "updated": "2023-11-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2302.02909v1", + "title": "Spectral Augmentations for Graph Contrastive Learning", + "abstract": "Contrastive learning has emerged as a premier method for learning\nrepresentations with or without supervision. Recent studies have shown its\nutility in graph representation learning for pre-training. Despite successes,\nthe understanding of how to design effective graph augmentations that can\ncapture structural properties common to many different types of downstream\ngraphs remains incomplete. We propose a set of well-motivated graph\ntransformation operations derived via graph spectral analysis to provide a bank\nof candidates when constructing augmentations for a graph contrastive\nobjective, enabling contrastive learning to capture useful structural\nrepresentation from pre-training graph datasets. We first present a spectral\ngraph cropping augmentation that involves filtering nodes by applying\nthresholds to the eigenvalues of the leading Laplacian eigenvectors. Our second\nnovel augmentation reorders the graph frequency components in a structural\nLaplacian-derived position graph embedding. Further, we introduce a method that\nleads to improved views of local subgraphs by performing alignment via global\nrandom walk embeddings. Our experimental results indicate consistent\nimprovements in out-of-domain graph data transfer compared to state-of-the-art\ngraph contrastive learning methods, shedding light on how to design a graph\nlearner that is able to learn structural properties common to diverse graph\ntypes.", + "authors": "Amur Ghose, Yingxue Zhang, Jianye Hao, Mark Coates", + "published": "2023-02-06", + "updated": "2023-02-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.11390v2", + "title": "Deep Learning for Molecular Graphs with Tiered Graph Autoencoders and Graph Prediction", + "abstract": "Tiered graph autoencoders provide the architecture and mechanisms for\nlearning tiered latent representations and latent spaces for molecular graphs\nthat explicitly represent and utilize groups (e.g., functional groups). This\nenables the utilization and exploration of tiered molecular latent spaces,\neither individually - the node (atom) tier, the group tier, or the graph\n(molecule) tier - or jointly, as well as navigation across the tiers. In this\npaper, we discuss the use of tiered graph autoencoders together with graph\nprediction for molecular graphs. We show features of molecular graphs used, and\ngroups in molecular graphs identified for some sample molecules. We briefly\nreview graph prediction and the QM9 dataset for background information, and\ndiscuss the use of tiered graph embeddings for graph prediction, particularly\nweighted group pooling. We find that functional groups and ring groups\neffectively capture and represent the chemical essence of molecular graphs\n(structures). Further, tiered graph autoencoders and graph prediction together\nprovide effective, efficient and interpretable deep learning for molecular\ngraphs, with the former providing unsupervised, transferable learning and the\nlatter providing supervised, task-optimized learning.", + "authors": "Daniel T. Chang", + "published": "2019-10-24", + "updated": "2021-07-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "q-bio.BM" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2306.11264v1", + "title": "GraphGLOW: Universal and Generalizable Structure Learning for Graph Neural Networks", + "abstract": "Graph structure learning is a well-established problem that aims at\noptimizing graph structures adaptive to specific graph datasets to help message\npassing neural networks (i.e., GNNs) to yield effective and robust node\nembeddings. However, the common limitation of existing models lies in the\nunderlying \\textit{closed-world assumption}: the testing graph is the same as\nthe training graph. This premise requires independently training the structure\nlearning model from scratch for each graph dataset, which leads to prohibitive\ncomputation costs and potential risks for serious over-fitting. To mitigate\nthese issues, this paper explores a new direction that moves forward to learn a\nuniversal structure learning model that can generalize across graph datasets in\nan open world. We first introduce the mathematical definition of this novel\nproblem setting, and describe the model formulation from a probabilistic\ndata-generative aspect. Then we devise a general framework that coordinates a\nsingle graph-shared structure learner and multiple graph-specific GNNs to\ncapture the generalizable patterns of optimal message-passing topology across\ndatasets. The well-trained structure learner can directly produce adaptive\nstructures for unseen target graphs without any fine-tuning. Across diverse\ndatasets and various challenging cross-graph generalization protocols, our\nexperiments show that even without training on target graphs, the proposed\nmodel i) significantly outperforms expressive GNNs trained on input\n(non-optimized) topology, and ii) surprisingly performs on par with\nstate-of-the-art models that independently optimize adaptive structures for\nspecific target graphs, with notably orders-of-magnitude acceleration for\ntraining on the target graph.", + "authors": "Wentao Zhao, Qitian Wu, Chenxiao Yang, Junchi Yan", + "published": "2023-06-20", + "updated": "2023-06-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1912.07832v1", + "title": "Deep Iterative and Adaptive Learning for Graph Neural Networks", + "abstract": "In this paper, we propose an end-to-end graph learning framework, namely Deep\nIterative and Adaptive Learning for Graph Neural Networks (DIAL-GNN), for\njointly learning the graph structure and graph embeddings simultaneously. We\nfirst cast the graph structure learning problem as a similarity metric learning\nproblem and leverage an adapted graph regularization for controlling\nsmoothness, connectivity and sparsity of the generated graph. We further\npropose a novel iterative method for searching for a hidden graph structure\nthat augments the initial graph structure. Our iterative method dynamically\nstops when the learned graph structure approaches close enough to the optimal\ngraph. Our extensive experiments demonstrate that the proposed DIAL-GNN model\ncan consistently outperform or match state-of-the-art baselines in terms of\nboth downstream task performance and computational time. The proposed approach\ncan cope with both transductive learning and inductive learning.", + "authors": "Yu Chen, Lingfei Wu, Mohammed J. Zaki", + "published": "2019-12-17", + "updated": "2019-12-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2005.03675v3", + "title": "Machine Learning on Graphs: A Model and Comprehensive Taxonomy", + "abstract": "There has been a surge of recent interest in learning representations for\ngraph-structured data. Graph representation learning methods have generally\nfallen into three main categories, based on the availability of labeled data.\nThe first, network embedding (such as shallow graph embedding or graph\nauto-encoders), focuses on learning unsupervised representations of relational\nstructure. The second, graph regularized neural networks, leverages graphs to\naugment neural network losses with a regularization objective for\nsemi-supervised learning. The third, graph neural networks, aims to learn\ndifferentiable functions over discrete topologies with arbitrary structure.\nHowever, despite the popularity of these areas there has been surprisingly\nlittle work on unifying the three paradigms. Here, we aim to bridge the gap\nbetween graph neural networks, network embedding and graph regularization\nmodels. We propose a comprehensive taxonomy of representation learning methods\nfor graph-structured data, aiming to unify several disparate bodies of work.\nSpecifically, we propose a Graph Encoder Decoder Model (GRAPHEDM), which\ngeneralizes popular algorithms for semi-supervised learning on graphs (e.g.\nGraphSage, Graph Convolutional Networks, Graph Attention Networks), and\nunsupervised learning of graph representations (e.g. DeepWalk, node2vec, etc)\ninto a single consistent approach. To illustrate the generality of this\napproach, we fit over thirty existing methods into this framework. We believe\nthat this unifying view both provides a solid foundation for understanding the\nintuition behind these methods, and enables future research in the area.", + "authors": "Ines Chami, Sami Abu-El-Haija, Bryan Perozzi, Christopher R\u00e9, Kevin Murphy", + "published": "2020-05-07", + "updated": "2022-04-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NE", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.03236v1", + "title": "Graph2Graph Learning with Conditional Autoregressive Models", + "abstract": "We present a graph neural network model for solving graph-to-graph learning\nproblems. Most deep learning on graphs considers ``simple'' problems such as\ngraph classification or regressing real-valued graph properties. For such\ntasks, the main requirement for intermediate representations of the data is to\nmaintain the structure needed for output, i.e., keeping classes separated or\nmaintaining the order indicated by the regressor. However, a number of learning\ntasks, such as regressing graph-valued output, generative models, or graph\nautoencoders, aim to predict a graph-structured output. In order to\nsuccessfully do this, the learned representations need to preserve far more\nstructure. We present a conditional auto-regressive model for graph-to-graph\nlearning and illustrate its representational capabilities via experiments on\nchallenging subgraph predictions from graph algorithmics; as a graph\nautoencoder for reconstruction and visualization; and on pretraining\nrepresentations that allow graph classification with limited labeled data.", + "authors": "Guan Wang, Francois Bernard Lauze, Aasa Feragen", + "published": "2021-06-06", + "updated": "2021-06-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2008.10065v1", + "title": "Kernel-based Graph Learning from Smooth Signals: A Functional Viewpoint", + "abstract": "The problem of graph learning concerns the construction of an explicit\ntopological structure revealing the relationship between nodes representing\ndata entities, which plays an increasingly important role in the success of\nmany graph-based representations and algorithms in the field of machine\nlearning and graph signal processing. In this paper, we propose a novel graph\nlearning framework that incorporates the node-side and observation-side\ninformation, and in particular the covariates that help to explain the\ndependency structures in graph signals. To this end, we consider graph signals\nas functions in the reproducing kernel Hilbert space associated with a\nKronecker product kernel, and integrate functional learning with\nsmoothness-promoting graph learning to learn a graph representing the\nrelationship between nodes. The functional learning increases the robustness of\ngraph learning against missing and incomplete information in the graph signals.\nIn addition, we develop a novel graph-based regularisation method which, when\ncombined with the Kronecker product kernel, enables our model to capture both\nthe dependency explained by the graph and the dependency due to graph signals\nobserved under different but related circumstances, e.g. different points in\ntime. The latter means the graph signals are free from the i.i.d. assumptions\nrequired by the classical graph learning models. Experiments on both synthetic\nand real-world data show that our methods outperform the state-of-the-art\nmodels in learning a meaningful graph topology from graph signals, in\nparticular under heavy noise, missing values, and multiple dependency.", + "authors": "Xingyue Pu, Siu Lun Chau, Xiaowen Dong, Dino Sejdinovic", + "published": "2020-08-23", + "updated": "2020-08-23", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG", + "cs.SI", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1802.04407v2", + "title": "Adversarially Regularized Graph Autoencoder for Graph Embedding", + "abstract": "Graph embedding is an effective method to represent graph data in a low\ndimensional space for graph analytics. Most existing embedding algorithms\ntypically focus on preserving the topological structure or minimizing the\nreconstruction errors of graph data, but they have mostly ignored the data\ndistribution of the latent codes from the graphs, which often results in\ninferior embedding in real-world graph data. In this paper, we propose a novel\nadversarial graph embedding framework for graph data. The framework encodes the\ntopological structure and node content in a graph to a compact representation,\non which a decoder is trained to reconstruct the graph structure. Furthermore,\nthe latent representation is enforced to match a prior distribution via an\nadversarial training scheme. To learn a robust embedding, two variants of\nadversarial approaches, adversarially regularized graph autoencoder (ARGA) and\nadversarially regularized variational graph autoencoder (ARVGA), are developed.\nExperimental studies on real-world graphs validate our design and demonstrate\nthat our algorithms outperform baselines by a wide margin in link prediction,\ngraph clustering, and graph visualization tasks.", + "authors": "Shirui Pan, Ruiqi Hu, Guodong Long, Jing Jiang, Lina Yao, Chengqi Zhang", + "published": "2018-02-13", + "updated": "2019-01-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2212.01749v1", + "title": "Semantic Graph Neural Network with Multi-measure Learning for Semi-supervised Classification", + "abstract": "Graph Neural Networks (GNNs) have attracted increasing attention in recent\nyears and have achieved excellent performance in semi-supervised node\nclassification tasks. The success of most GNNs relies on one fundamental\nassumption, i.e., the original graph structure data is available. However,\nrecent studies have shown that GNNs are vulnerable to the complex underlying\nstructure of the graph, making it necessary to learn comprehensive and robust\ngraph structures for downstream tasks, rather than relying only on the raw\ngraph structure. In light of this, we seek to learn optimal graph structures\nfor downstream tasks and propose a novel framework for semi-supervised\nclassification. Specifically, based on the structural context information of\ngraph and node representations, we encode the complex interactions in semantics\nand generate semantic graphs to preserve the global structure. Moreover, we\ndevelop a novel multi-measure attention layer to optimize the similarity rather\nthan prescribing it a priori, so that the similarity can be adaptively\nevaluated by integrating measures. These graphs are fused and optimized\ntogether with GNN towards semi-supervised classification objective. Extensive\nexperiments and ablation studies on six real-world datasets clearly demonstrate\nthe effectiveness of our proposed model and the contribution of each component.", + "authors": "Junchao Lin, Yuan Wan, Jingwen Xu, Xingchen Qi", + "published": "2022-12-04", + "updated": "2022-12-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2003.03892v2", + "title": "COPT: Coordinated Optimal Transport for Graph Sketching", + "abstract": "We introduce COPT, a novel distance metric between graphs defined via an\noptimization routine, computing a coordinated pair of optimal transport maps\nsimultaneously. This gives an unsupervised way to learn general-purpose graph\nrepresentation, applicable to both graph sketching and graph comparison. COPT\ninvolves simultaneously optimizing dual transport plans, one between the\nvertices of two graphs, and another between graph signal probability\ndistributions. We show theoretically that our method preserves important global\nstructural information on graphs, in particular spectral information, and\nanalyze connections to existing studies. Empirically, COPT outperforms state of\nthe art methods in graph classification on both synthetic and real datasets.", + "authors": "Yihe Dong, Will Sawin", + "published": "2020-03-09", + "updated": "2020-06-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.DS", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2009.00647v4", + "title": "Lifelong Graph Learning", + "abstract": "Graph neural networks (GNN) are powerful models for many graph-structured\ntasks. Existing models often assume that the complete structure of the graph is\navailable during training. In practice, however, graph-structured data is\nusually formed in a streaming fashion so that learning a graph continuously is\noften necessary. In this paper, we bridge GNN and lifelong learning by\nconverting a continual graph learning problem to a regular graph learning\nproblem so GNN can inherit the lifelong learning techniques developed for\nconvolutional neural networks (CNN). We propose a new topology, the feature\ngraph, which takes features as new nodes and turns nodes into independent\ngraphs. This successfully converts the original problem of node classification\nto graph classification. In the experiments, we demonstrate the efficiency and\neffectiveness of feature graph networks (FGN) by continuously learning a\nsequence of classical graph datasets. We also show that FGN achieves superior\nperformance in two applications, i.e., lifelong human action recognition with\nwearable devices and feature matching. To the best of our knowledge, FGN is the\nfirst method to bridge graph learning and lifelong learning via a novel graph\ntopology. Source code is available at https://github.com/wang-chen/LGL", + "authors": "Chen Wang, Yuheng Qiu, Dasong Gao, Sebastian Scherer", + "published": "2020-09-01", + "updated": "2022-03-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2004.06846v1", + "title": "MxPool: Multiplex Pooling for Hierarchical Graph Representation Learning", + "abstract": "How to utilize deep learning methods for graph classification tasks has\nattracted considerable research attention in the past few years. Regarding\ngraph classification tasks, the graphs to be classified may have various graph\nsizes (i.e., different number of nodes and edges) and have various graph\nproperties (e.g., average node degree, diameter, and clustering coefficient).\nThe diverse property of graphs has imposed significant challenges on existing\ngraph learning techniques since diverse graphs have different best-fit\nhyperparameters. It is difficult to learn graph features from a set of diverse\ngraphs by a unified graph neural network. This motivates us to use a multiplex\nstructure in a diverse way and utilize a priori properties of graphs to guide\nthe learning. In this paper, we propose MxPool, which concurrently uses\nmultiple graph convolution/pooling networks to build a hierarchical learning\nstructure for graph representation learning tasks. Our experiments on numerous\ngraph classification benchmarks show that our MxPool has superiority over other\nstate-of-the-art graph representation learning methods.", + "authors": "Yanyan Liang, Yanfeng Zhang, Dechao Gao, Qian Xu", + "published": "2020-04-15", + "updated": "2020-04-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1803.03324v1", + "title": "Learning Deep Generative Models of Graphs", + "abstract": "Graphs are fundamental data structures which concisely capture the relational\nstructure in many important real-world domains, such as knowledge graphs,\nphysical and social interactions, language, and chemistry. Here we introduce a\npowerful new approach for learning generative models over graphs, which can\ncapture both their structure and attributes. Our approach uses graph neural\nnetworks to express probabilistic dependencies among a graph's nodes and edges,\nand can, in principle, learn distributions over any arbitrary graph. In a\nseries of experiments our results show that once trained, our models can\ngenerate good quality samples of both synthetic graphs as well as real\nmolecular graphs, both unconditionally and conditioned on data. Compared to\nbaselines that do not use graph-structured representations, our models often\nperform far better. We also explore key challenges of learning generative\nmodels of graphs, such as how to handle symmetries and ordering of elements\nduring the graph generation process, and offer possible solutions. Our work is\nthe first and most general approach for learning generative models over\narbitrary graphs, and opens new directions for moving away from restrictions of\nvector- and sequence-like knowledge representations, toward more expressive and\nflexible relational data structures.", + "authors": "Yujia Li, Oriol Vinyals, Chris Dyer, Razvan Pascanu, Peter Battaglia", + "published": "2018-03-08", + "updated": "2018-03-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2005.14403v1", + "title": "Deep graph learning for semi-supervised classification", + "abstract": "Graph learning (GL) can dynamically capture the distribution structure (graph\nstructure) of data based on graph convolutional networks (GCN), and the\nlearning quality of the graph structure directly influences GCN for\nsemi-supervised classification. Existing methods mostly combine the\ncomputational layer and the related losses into GCN for exploring the global\ngraph(measuring graph structure from all data samples) or local graph\n(measuring graph structure from local data samples). Global graph emphasises on\nthe whole structure description of the inter-class data, while local graph\ntrend to the neighborhood structure representation of intra-class data.\nHowever, it is difficult to simultaneously balance these graphs of the learning\nprocess for semi-supervised classification because of the interdependence of\nthese graphs. To simulate the interdependence, deep graph learning(DGL) is\nproposed to find the better graph representation for semi-supervised\nclassification. DGL can not only learn the global structure by the previous\nlayer metric computation updating, but also mine the local structure by next\nlayer local weight reassignment. Furthermore, DGL can fuse the different\nstructures by dynamically encoding the interdependence of these structures, and\ndeeply mine the relationship of the different structures by the hierarchical\nprogressive learning for improving the performance of semi-supervised\nclassification. Experiments demonstrate the DGL outperforms state-of-the-art\nmethods on three benchmark datasets (Citeseer,Cora, and Pubmed) for citation\nnetworks and two benchmark datasets (MNIST and Cifar10) for images.", + "authors": "Guangfeng Lin, Xiaobing Kang, Kaiyang Liao, Fan Zhao, Yajun Chen", + "published": "2020-05-29", + "updated": "2020-05-29", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2003.04508v3", + "title": "Unsupervised Graph Embedding via Adaptive Graph Learning", + "abstract": "Graph autoencoders (GAEs) are powerful tools in representation learning for\ngraph embedding. However, the performance of GAEs is very dependent on the\nquality of the graph structure, i.e., of the adjacency matrix. In other words,\nGAEs would perform poorly when the adjacency matrix is incomplete or be\ndisturbed. In this paper, two novel unsupervised graph embedding methods,\nunsupervised graph embedding via adaptive graph learning (BAGE) and\nunsupervised graph embedding via variational adaptive graph learning (VBAGE)\nare proposed. The proposed methods expand the application range of GAEs on\ngraph embedding, i.e, on the general datasets without graph structure.\nMeanwhile, the adaptive learning mechanism can initialize the adjacency matrix\nwithout be affected by the parameter. Besides that, the latent representations\nare embedded in the laplacian graph structure to preserve the topology\nstructure of the graph in the vector space. Moreover, the adjacency matrix can\nbe self-learned for better embedding performance when the original graph\nstructure is incomplete. With adaptive learning, the proposed method is much\nmore robust to the graph structure. Experimental studies on several datasets\nvalidate our design and demonstrate that our methods outperform baselines by a\nwide margin in node clustering, node classification, and graph visualization\ntasks.", + "authors": "Rui Zhang, Yunxing Zhang, Xuelong Li", + "published": "2020-03-10", + "updated": "2021-03-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2104.09304v1", + "title": "A Tunable Model for Graph Generation Using LSTM and Conditional VAE", + "abstract": "With the development of graph applications, generative models for graphs have\nbeen more crucial. Classically, stochastic models that generate graphs with a\npre-defined probability of edges and nodes have been studied. Recently, some\nmodels that reproduce the structural features of graphs by learning from actual\ngraph data using machine learning have been studied. However, in these\nconventional studies based on machine learning, structural features of graphs\ncan be learned from data, but it is not possible to tune features and generate\ngraphs with specific features. In this paper, we propose a generative model\nthat can tune specific features, while learning structural features of a graph\nfrom data. With a dataset of graphs with various features generated by a\nstochastic model, we confirm that our model can generate a graph with specific\nfeatures.", + "authors": "Shohei Nakazawa, Yoshiki Sato, Kenji Nakagawa, Sho Tsugawa, Kohei Watabe", + "published": "2021-04-15", + "updated": "2021-04-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.NI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.10146v2", + "title": "Exploring Structure-Adaptive Graph Learning for Robust Semi-Supervised Classification", + "abstract": "Graph Convolutional Neural Networks (GCNNs) are generalizations of CNNs to\ngraph-structured data, in which convolution is guided by the graph topology. In\nmany cases where graphs are unavailable, existing methods manually construct\ngraphs or learn task-driven adaptive graphs. In this paper, we propose Graph\nLearning Neural Networks (GLNNs), which exploit the optimization of graphs (the\nadjacency matrix in particular) from both data and tasks. Leveraging on\nspectral graph theory, we propose the objective of graph learning from a\nsparsity constraint, properties of a valid adjacency matrix as well as a graph\nLaplacian regularizer via maximum a posteriori estimation. The optimization\nobjective is then integrated into the loss function of the GCNN, which adapts\nthe graph topology to not only labels of a specific task but also the input\ndata. Experimental results show that our proposed GLNN outperforms\nstate-of-the-art approaches over widely adopted social network datasets and\ncitation network datasets for semi-supervised classification.", + "authors": "Xiang Gao, Wei Hu, Zongming Guo", + "published": "2019-04-23", + "updated": "2019-09-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1903.00614v1", + "title": "GAP: Generalizable Approximate Graph Partitioning Framework", + "abstract": "Graph partitioning is the problem of dividing the nodes of a graph into\nbalanced partitions while minimizing the edge cut across the partitions. Due to\nits combinatorial nature, many approximate solutions have been developed,\nincluding variants of multi-level methods and spectral clustering. We propose\nGAP, a Generalizable Approximate Partitioning framework that takes a deep\nlearning approach to graph partitioning. We define a differentiable loss\nfunction that represents the partitioning objective and use backpropagation to\noptimize the network parameters. Unlike baselines that redo the optimization\nper graph, GAP is capable of generalization, allowing us to train models that\nproduce performant partitions at inference time, even on unseen graphs.\nFurthermore, because we learn the representation of the graph while jointly\noptimizing for the partitioning loss function, GAP can be easily tuned for a\nvariety of graph structures. We evaluate the performance of GAP on graphs of\nvarying sizes and structures, including graphs of widely used machine learning\nmodels (e.g., ResNet, VGG, and Inception-V3), scale-free graphs, and random\ngraphs. We show that GAP achieves competitive partitions while being up to 100\ntimes faster than the baseline and generalizes to unseen graphs.", + "authors": "Azade Nazi, Will Hang, Anna Goldie, Sujith Ravi, Azalia Mirhoseini", + "published": "2019-03-02", + "updated": "2019-03-02", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1611.07308v1", + "title": "Variational Graph Auto-Encoders", + "abstract": "We introduce the variational graph auto-encoder (VGAE), a framework for\nunsupervised learning on graph-structured data based on the variational\nauto-encoder (VAE). This model makes use of latent variables and is capable of\nlearning interpretable latent representations for undirected graphs. We\ndemonstrate this model using a graph convolutional network (GCN) encoder and a\nsimple inner product decoder. Our model achieves competitive results on a link\nprediction task in citation networks. In contrast to most existing models for\nunsupervised learning on graph-structured data and link prediction, our model\ncan naturally incorporate node features, which significantly improves\npredictive performance on a number of benchmark datasets.", + "authors": "Thomas N. Kipf, Max Welling", + "published": "2016-11-21", + "updated": "2016-11-21", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2106.10124v1", + "title": "Graph Context Encoder: Graph Feature Inpainting for Graph Generation and Self-supervised Pretraining", + "abstract": "We propose the Graph Context Encoder (GCE), a simple but efficient approach\nfor graph representation learning based on graph feature masking and\nreconstruction.\n GCE models are trained to efficiently reconstruct input graphs similarly to a\ngraph autoencoder where node and edge labels are masked. In particular, our\nmodel is also allowed to change graph structures by masking and reconstructing\ngraphs augmented by random pseudo-edges.\n We show that GCE can be used for novel graph generation, with applications\nfor molecule generation. Used as a pretraining method, we also show that GCE\nimproves baseline performances in supervised classification tasks tested on\nmultiple standard benchmark graph datasets.", + "authors": "Oriel Frigo, R\u00e9my Brossard, David Dehaene", + "published": "2021-06-18", + "updated": "2021-06-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "68T07" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1912.10206v1", + "title": "How Robust Are Graph Neural Networks to Structural Noise?", + "abstract": "Graph neural networks (GNNs) are an emerging model for learning graph\nembeddings and making predictions on graph structured data. However, robustness\nof graph neural networks is not yet well-understood. In this work, we focus on\nnode structural identity predictions, where a representative GNN model is able\nto achieve near-perfect accuracy. We also show that the same GNN model is not\nrobust to addition of structural noise, through a controlled dataset and set of\nexperiments. Finally, we show that under the right conditions, graph-augmented\ntraining is capable of significantly improving robustness to structural noise.", + "authors": "James Fox, Sivasankaran Rajamanickam", + "published": "2019-12-21", + "updated": "2019-12-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.07294v1", + "title": "Graph Data Condensation via Self-expressive Graph Structure Reconstruction", + "abstract": "With the increasing demands of training graph neural networks (GNNs) on\nlarge-scale graphs, graph data condensation has emerged as a critical technique\nto relieve the storage and time costs during the training phase. It aims to\ncondense the original large-scale graph to a much smaller synthetic graph while\npreserving the essential information necessary for efficiently training a\ndownstream GNN. However, existing methods concentrate either on optimizing node\nfeatures exclusively or endeavor to independently learn node features and the\ngraph structure generator. They could not explicitly leverage the information\nof the original graph structure and failed to construct an interpretable graph\nstructure for the synthetic dataset. To address these issues, we introduce a\nnovel framework named \\textbf{G}raph Data \\textbf{C}ondensation via\n\\textbf{S}elf-expressive Graph Structure \\textbf{R}econstruction\n(\\textbf{GCSR}). Our method stands out by (1) explicitly incorporating the\noriginal graph structure into the condensing process and (2) capturing the\nnuanced interdependencies between the condensed nodes by reconstructing an\ninterpretable self-expressive graph structure. Extensive experiments and\ncomprehensive analysis validate the efficacy of the proposed method across\ndiverse GNN models and datasets. Our code is available at\nhttps://www.dropbox.com/scl/fi/2aonyp5ln5gisdqtjimu8/GCSR.zip?rlkey=11cuwfpsf54wxiiktu0klud0x&dl=0", + "authors": "Zhanyu Liu, Chaolv Zeng, Guanjie Zheng", + "published": "2024-03-12", + "updated": "2024-03-12", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.03262v2", + "title": "CGCL: Collaborative Graph Contrastive Learning without Handcrafted Graph Data Augmentations", + "abstract": "Unsupervised graph representation learning is a non-trivial topic. The\nsuccess of contrastive methods in the unsupervised representation learning on\nstructured data inspires similar attempts on the graph. Existing graph\ncontrastive learning (GCL) aims to learn the invariance across multiple\naugmentation views, which renders it heavily reliant on the handcrafted graph\naugmentations. However, inappropriate graph data augmentations can potentially\njeopardize such invariance. In this paper, we show the potential hazards of\ninappropriate augmentations and then propose a novel Collaborative Graph\nContrastive Learning framework (CGCL). This framework harnesses multiple graph\nencoders to observe the graph. Features observed from different encoders serve\nas the contrastive views in contrastive learning, which avoids inducing\nunstable perturbation and guarantees the invariance. To ensure the\ncollaboration among diverse graph encoders, we propose the concepts of\nasymmetric architecture and complementary encoders as the design principle. To\nfurther prove the rationality, we utilize two quantitative metrics to measure\nthe assembly of CGCL respectively. Extensive experiments demonstrate the\nadvantages of CGCL in unsupervised graph-level representation learning and the\npotential of collaborative framework. The source code for reproducibility is\navailable at https://github.com/zhangtia16/CGCL", + "authors": "Tianyu Zhang, Yuxiang Ren, Wenzheng Feng, Weitao Du, Xuecang Zhang", + "published": "2021-11-05", + "updated": "2024-04-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1910.08057v1", + "title": "Graph Embedding VAE: A Permutation Invariant Model of Graph Structure", + "abstract": "Generative models of graph structure have applications in biology and social\nsciences. The state of the art is GraphRNN, which decomposes the graph\ngeneration process into a series of sequential steps. While effective for\nmodest sizes, it loses its permutation invariance for larger graphs. Instead,\nwe present a permutation invariant latent-variable generative model relying on\ngraph embeddings to encode structure. Using tools from the random graph\nliterature, our model is highly scalable to large graphs with likelihood\nevaluation and generation in $O(|V | + |E|)$.", + "authors": "Tony Duan, Juho Lee", + "published": "2019-10-17", + "updated": "2019-10-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2103.10837v1", + "title": "Quantum machine learning of graph-structured data", + "abstract": "Graph structures are ubiquitous throughout the natural sciences. Here we\nconsider graph-structured quantum data and describe how to carry out its\nquantum machine learning via quantum neural networks. In particular, we\nconsider training data in the form of pairs of input and output quantum states\nassociated with the vertices of a graph, together with edges encoding\ncorrelations between the vertices. We explain how to systematically exploit\nthis additional graph structure to improve quantum learning algorithms. These\nalgorithms are numerically simulated and exhibit excellent learning behavior.\nScalable quantum implementations of the learning procedures are likely feasible\non the next generation of quantum computing devices.", + "authors": "Kerstin Beer, Megha Khosla, Julius K\u00f6hler, Tobias J. Osborne", + "published": "2021-03-19", + "updated": "2021-03-19", + "primary_cat": "quant-ph", + "cats": [ + "quant-ph" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2304.13195v1", + "title": "Connector 0.5: A unified framework for graph representation learning", + "abstract": "Graph representation learning models aim to represent the graph structure and\nits features into low-dimensional vectors in a latent space, which can benefit\nvarious downstream tasks, such as node classification and link prediction. Due\nto its powerful graph data modelling capabilities, various graph embedding\nmodels and libraries have been proposed to learn embeddings and help\nresearchers ease conducting experiments. In this paper, we introduce a novel\ngraph representation framework covering various graph embedding models, ranging\nfrom shallow to state-of-the-art models, namely Connector. First, we consider\ngraph generation by constructing various types of graphs with different\nstructural relations, including homogeneous, signed, heterogeneous, and\nknowledge graphs. Second, we introduce various graph representation learning\nmodels, ranging from shallow to deep graph embedding models. Finally, we plan\nto build an efficient open-source framework that can provide deep graph\nembedding models to represent structural relations in graphs. The framework is\navailable at https://github.com/NSLab-CUK/Connector.", + "authors": "Thanh Sang Nguyen, Jooho Lee, Van Thuy Hoang, O-Joun Lee", + "published": "2023-04-25", + "updated": "2023-04-25", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2401.16176v1", + "title": "A Survey on Structure-Preserving Graph Transformers", + "abstract": "The transformer architecture has shown remarkable success in various domains,\nsuch as natural language processing and computer vision. When it comes to graph\nlearning, transformers are required not only to capture the interactions\nbetween pairs of nodes but also to preserve graph structures connoting the\nunderlying relations and proximity between them, showing the expressive power\nto capture different graph structures. Accordingly, various\nstructure-preserving graph transformers have been proposed and widely used for\nvarious tasks, such as graph-level tasks in bioinformatics and\nchemoinformatics. However, strategies related to graph structure preservation\nhave not been well organized and systematized in the literature. In this paper,\nwe provide a comprehensive overview of structure-preserving graph transformers\nand generalize these methods from the perspective of their design objective.\nFirst, we divide strategies into four main groups: node feature modulation,\ncontext node sampling, graph rewriting, and transformer architecture\nimprovements. We then further divide the strategies according to the coverage\nand goals of graph structure preservation. Furthermore, we also discuss\nchallenges and future directions for graph transformer models to preserve the\ngraph structure and understand the nature of graphs.", + "authors": "Van Thuy Hoang, O-Joun Lee", + "published": "2024-01-29", + "updated": "2024-01-29", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2101.00082v1", + "title": "Bosonic Random Walk Networks for Graph Learning", + "abstract": "The development of Graph Neural Networks (GNNs) has led to great progress in\nmachine learning on graph-structured data. These networks operate via diffusing\ninformation across the graph nodes while capturing the structure of the graph.\nRecently there has also seen tremendous progress in quantum computing\ntechniques. In this work, we explore applications of multi-particle quantum\nwalks on diffusing information across graphs. Our model is based on learning\nthe operators that govern the dynamics of quantum random walkers on graphs. We\ndemonstrate the effectiveness of our method on classification and regression\ntasks.", + "authors": "Shiv Shankar, Don Towsley", + "published": "2020-12-31", + "updated": "2020-12-31", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2201.06367v1", + "title": "Towards Unsupervised Deep Graph Structure Learning", + "abstract": "In recent years, graph neural networks (GNNs) have emerged as a successful\ntool in a variety of graph-related applications. However, the performance of\nGNNs can be deteriorated when noisy connections occur in the original graph\nstructures; besides, the dependence on explicit structures prevents GNNs from\nbeing applied to general unstructured scenarios. To address these issues,\nrecently emerged deep graph structure learning (GSL) methods propose to jointly\noptimize the graph structure along with GNN under the supervision of a node\nclassification task. Nonetheless, these methods focus on a supervised learning\nscenario, which leads to several problems, i.e., the reliance on labels, the\nbias of edge distribution, and the limitation on application tasks. In this\npaper, we propose a more practical GSL paradigm, unsupervised graph structure\nlearning, where the learned graph topology is optimized by data itself without\nany external guidance (i.e., labels). To solve the unsupervised GSL problem, we\npropose a novel StrUcture Bootstrapping contrastive LearnIng fraMEwork (SUBLIME\nfor abbreviation) with the aid of self-supervised contrastive learning.\nSpecifically, we generate a learning target from the original data as an\n\"anchor graph\", and use a contrastive loss to maximize the agreement between\nthe anchor graph and the learned graph. To provide persistent guidance, we\ndesign a novel bootstrapping mechanism that upgrades the anchor graph with\nlearned structures during model learning. We also design a series of graph\nlearners and post-processing schemes to model the structures to learn.\nExtensive experiments on eight benchmark datasets demonstrate the significant\neffectiveness of our proposed SUBLIME and high quality of the optimized graphs.", + "authors": "Yixin Liu, Yu Zheng, Daokun Zhang, Hongxu Chen, Hao Peng, Shirui Pan", + "published": "2022-01-17", + "updated": "2022-01-17", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2206.01152v2", + "title": "Causal Structure Learning: a Combinatorial Perspective", + "abstract": "In this review, we discuss approaches for learning causal structure from\ndata, also called causal discovery. In particular, we focus on approaches for\nlearning directed acyclic graphs (DAGs) and various generalizations which allow\nfor some variables to be unobserved in the available data. We devote special\nattention to two fundamental combinatorial aspects of causal structure\nlearning. First, we discuss the structure of the search space over causal\ngraphs. Second, we discuss the structure of equivalence classes over causal\ngraphs, i.e., sets of graphs which represent what can be learned from\nobservational data alone, and how these equivalence classes can be refined by\nadding interventional data.", + "authors": "Chandler Squires, Caroline Uhler", + "published": "2022-06-02", + "updated": "2022-12-19", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2204.05258v1", + "title": "Multi-view graph structure learning using subspace merging on Grassmann manifold", + "abstract": "Many successful learning algorithms have been recently developed to represent\ngraph-structured data. For example, Graph Neural Networks (GNNs) have achieved\nconsiderable successes in various tasks such as node classification, graph\nclassification, and link prediction. However, these methods are highly\ndependent on the quality of the input graph structure. One used approach to\nalleviate this problem is to learn the graph structure instead of relying on a\nmanually designed graph. In this paper, we introduce a new graph structure\nlearning approach using multi-view learning, named MV-GSL (Multi-View Graph\nStructure Learning), in which we aggregate different graph structure learning\nmethods using subspace merging on Grassmann manifold to improve the quality of\nthe learned graph structures. Extensive experiments are performed to evaluate\nthe effectiveness of the proposed method on two benchmark datasets, Cora and\nCiteseer. Our experiments show that the proposed method has promising\nperformance compared to single and other combined graph structure learning\nmethods.", + "authors": "Razieh Ghiasi, Hossein Amirkhani, Alireza Bosaghzadeh", + "published": "2022-04-11", + "updated": "2022-04-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2111.06679v2", + "title": "deepstruct -- linking deep learning and graph theory", + "abstract": "deepstruct connects deep learning models and graph theory such that different\ngraph structures can be imposed on neural networks or graph structures can be\nextracted from trained neural network models. For this, deepstruct provides\ndeep neural network models with different restrictions which can be created\nbased on an initial graph. Further, tools to extract graph structures from\ntrained models are available. This step of extracting graphs can be\ncomputationally expensive even for models of just a few dozen thousand\nparameters and poses a challenging problem. deepstruct supports research in\npruning, neural architecture search, automated network design and structure\nanalysis of neural networks.", + "authors": "Julian Stier, Michael Granitzer", + "published": "2021-11-12", + "updated": "2021-12-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.NE", + "I.2.0; F.0" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2402.02321v1", + "title": "Active Learning for Graphs with Noisy Structures", + "abstract": "Graph Neural Networks (GNNs) have seen significant success in tasks such as\nnode classification, largely contingent upon the availability of sufficient\nlabeled nodes. Yet, the excessive cost of labeling large-scale graphs led to a\nfocus on active learning on graphs, which aims for effective data selection to\nmaximize downstream model performance. Notably, most existing methods assume\nreliable graph topology, while real-world scenarios often present noisy graphs.\nGiven this, designing a successful active learning framework for noisy graphs\nis highly needed but challenging, as selecting data for labeling and obtaining\na clean graph are two tasks naturally interdependent: selecting high-quality\ndata requires clean graph structure while cleaning noisy graph structure\nrequires sufficient labeled data. Considering the complexity mentioned above,\nwe propose an active learning framework, GALClean, which has been specifically\ndesigned to adopt an iterative approach for conducting both data selection and\ngraph purification simultaneously with best information learned from the prior\niteration. Importantly, we summarize GALClean as an instance of the\nExpectation-Maximization algorithm, which provides a theoretical understanding\nof its design and mechanisms. This theory naturally leads to an enhanced\nversion, GALClean+. Extensive experiments have demonstrated the effectiveness\nand robustness of our proposed method across various types and levels of noisy\ngraphs.", + "authors": "Hongliang Chi, Cong Qi, Suhang Wang, Yao Ma", + "published": "2024-02-04", + "updated": "2024-02-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2212.08966v4", + "title": "Graph Learning and Its Advancements on Large Language Models: A Holistic Survey", + "abstract": "Graph learning is a prevalent domain that endeavors to learn the intricate\nrelationships among nodes and the topological structure of graphs. Over the\nyears, graph learning has transcended from graph theory to graph data mining.\nWith the advent of representation learning, it has attained remarkable\nperformance in diverse scenarios. Owing to its extensive application prospects,\ngraph learning attracts copious attention. While some researchers have\naccomplished impressive surveys on graph learning, they failed to connect\nrelated objectives, methods, and applications in a more coherent way. As a\nresult, they did not encompass current ample scenarios and challenging problems\ndue to the rapid expansion of graph learning. Particularly, large language\nmodels have recently had a disruptive effect on human life, but they also show\nrelative weakness in structured scenarios. The question of how to make these\nmodels more powerful with graph learning remains open. Our survey focuses on\nthe most recent advancements in integrating graph learning with pre-trained\nlanguage models, specifically emphasizing their application within the domain\nof large language models. Different from previous surveys on graph learning, we\nprovide a holistic review that analyzes current works from the perspective of\ngraph structure, and discusses the latest applications, trends, and challenges\nin graph learning. Specifically, we commence by proposing a taxonomy and then\nsummarize the methods employed in graph learning. We then provide a detailed\nelucidation of mainstream applications. Finally, we propose future directions.", + "authors": "Shaopeng Wei, Yu Zhao, Xingyan Chen, Qing Li, Fuzhen Zhuang, Ji Liu, Fuji Ren, Gang Kou", + "published": "2022-12-17", + "updated": "2023-11-18", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2403.03659v1", + "title": "Robust Graph Structure Learning under Heterophily", + "abstract": "Graph is a fundamental mathematical structure in characterizing relations\nbetween different objects and has been widely used on various learning tasks.\nMost methods implicitly assume a given graph to be accurate and complete.\nHowever, real data is inevitably noisy and sparse, which will lead to inferior\nresults. Despite the remarkable success of recent graph representation learning\nmethods, they inherently presume that the graph is homophilic, and largely\noverlook heterophily, where most connected nodes are from different classes. In\nthis regard, we propose a novel robust graph structure learning method to\nachieve a high-quality graph from heterophilic data for downstream tasks. We\nfirst apply a high-pass filter to make each node more distinctive from its\nneighbors by encoding structure information into the node features. Then, we\nlearn a robust graph with an adaptive norm characterizing different levels of\nnoise. Afterwards, we propose a novel regularizer to further refine the graph\nstructure. Clustering and semi-supervised classification experiments on\nheterophilic graphs verify the effectiveness of our method.", + "authors": "Xuanting Xie, Zhao Kang, Wenyu Chen", + "published": "2024-03-06", + "updated": "2024-03-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2404.11869v1", + "title": "Multi-view Graph Structural Representation Learning via Graph Coarsening", + "abstract": "Graph Transformers (GTs) have made remarkable achievements in graph-level\ntasks. However, most existing works regard graph structures as a form of\nguidance or bias for enhancing node representations, which focuses on\nnode-central perspectives and lacks explicit representations of edges and\nstructures. One natural question is, can we treat graph structures node-like as\na whole to learn high-level features? Through experimental analysis, we explore\nthe feasibility of this assumption. Based on our findings, we propose a novel\nmulti-view graph structural representation learning model via graph coarsening\n(MSLgo) on GT architecture for graph classification. Specifically, we build\nthree unique views, original, coarsening, and conversion, to learn a thorough\nstructural representation. We compress loops and cliques via hierarchical\nheuristic graph coarsening and restrict them with well-designed constraints,\nwhich builds the coarsening view to learn high-level interactions between\nstructures. We also introduce line graphs for edge embeddings and switch to\nedge-central perspective to construct the conversion view. Experiments on six\nreal-world datasets demonstrate the improvements of MSLgo over 14 baselines\nfrom various architectures.", + "authors": "Xiaorui Qi, Qijie Bai, Yanlong Wen, Haiwei Zhang, Xiaojie Yuan", + "published": "2024-04-18", + "updated": "2024-04-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1611.05181v3", + "title": "Graph Learning from Data under Structural and Laplacian Constraints", + "abstract": "Graphs are fundamental mathematical structures used in various fields to\nrepresent data, signals and processes. In this paper, we propose a novel\nframework for learning/estimating graphs from data. The proposed framework\nincludes (i) formulation of various graph learning problems, (ii) their\nprobabilistic interpretations and (iii) associated algorithms. Specifically,\ngraph learning problems are posed as estimation of graph Laplacian matrices\nfrom some observed data under given structural constraints (e.g., graph\nconnectivity and sparsity level). From a probabilistic perspective, the\nproblems of interest correspond to maximum a posteriori (MAP) parameter\nestimation of Gaussian-Markov random field (GMRF) models, whose precision\n(inverse covariance) is a graph Laplacian matrix. For the proposed graph\nlearning problems, specialized algorithms are developed by incorporating the\ngraph Laplacian and structural constraints. The experimental results\ndemonstrate that the proposed algorithms outperform the current\nstate-of-the-art methods in terms of accuracy and computational efficiency.", + "authors": "Hilmi E. Egilmez, Eduardo Pavez, Antonio Ortega", + "published": "2016-11-16", + "updated": "2017-07-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.08915v2", + "title": "Decoding Molecular Graph Embeddings with Reinforcement Learning", + "abstract": "We present RL-VAE, a graph-to-graph variational autoencoder that uses\nreinforcement learning to decode molecular graphs from latent embeddings.\nMethods have been described previously for graph-to-graph autoencoding, but\nthese approaches require sophisticated decoders that increase the complexity of\ntraining and evaluation (such as requiring parallel encoders and decoders or\nnon-trivial graph matching). Here, we repurpose a simple graph generator to\nenable efficient decoding and generation of molecular graphs.", + "authors": "Steven Kearnes, Li Li, Patrick Riley", + "published": "2019-04-18", + "updated": "2019-06-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1905.11691v1", + "title": "Triple2Vec: Learning Triple Embeddings from Knowledge Graphs", + "abstract": "Graph embedding techniques allow to learn high-quality feature vectors from\ngraph structures and are useful in a variety of tasks, from node classification\nto clustering. Existing approaches have only focused on learning feature\nvectors for the nodes in a (knowledge) graph. To the best of our knowledge,\nnone of them has tackled the problem of embedding of graph edges, that is,\nknowledge graph triples. The approaches that are closer to this task have\nfocused on homogeneous graphs involving only one type of edge and obtain edge\nembeddings by applying some operation (e.g., average) on the embeddings of the\nendpoint nodes. The goal of this paper is to introduce Triple2Vec, a new\ntechnique to directly embed edges in (knowledge) graphs. Trple2Vec builds upon\nthree main ingredients. The first is the notion of line graph. The line graph\nof a graph is another graph representing the adjacency between edges of the\noriginal graph. In particular, the nodes of the line graph are the edges of the\noriginal graph. We show that directly applying existing embedding techniques on\nthe nodes of the line graph to learn edge embeddings is not enough in the\ncontext of knowledge graphs. Thus, we introduce the notion of triple line\ngraph. The second is an edge weighting mechanism both for line graphs derived\nfrom knowledge graphs and homogeneous graphs. The third is a strategy based on\ngraph walks on the weighted triple line graph that can preserve proximity\nbetween nodes. Embeddings are finally generated by adopting the SkipGram model,\nwhere sentences are replaced with graph walks. We evaluate our approach on\ndifferent real world (knowledge) graphs and compared it with related work.", + "authors": "Valeria Fionda, Giuseppe Pirr\u00f3", + "published": "2019-05-28", + "updated": "2019-05-28", + "primary_cat": "cs.AI", + "cats": [ + "cs.AI", + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2210.01489v1", + "title": "Generative Models and Learning Algorithms for Core-Periphery Structured Graphs", + "abstract": "We consider core-periphery structured graphs, which are graphs with a group\nof densely and sparsely connected nodes, respectively, referred to as core and\nperiphery nodes. The so-called core score of a node is related to the\nlikelihood of it being a core node. In this paper, we focus on learning the\ncore scores of a graph from its node attributes and connectivity structure. To\nthis end, we propose two classes of probabilistic graphical models: affine and\nnonlinear. First, we describe affine generative models to model the dependence\nof node attributes on its core scores, which determine the graph structure.\nNext, we discuss nonlinear generative models in which the partial correlations\nof node attributes influence the graph structure through latent core scores. We\ndevelop algorithms for inferring the model parameters and core scores of a\ngraph when both the graph structure and node attributes are available. When\nonly the node attributes of graphs are available, we jointly learn a\ncore-periphery structured graph and its core scores. We provide results from\nnumerical experiments on several synthetic and real-world datasets to\ndemonstrate the efficacy of the developed models and algorithms.", + "authors": "Sravanthi Gurugubelli, Sundeep Prabhakar Chepuri", + "published": "2022-10-04", + "updated": "2022-10-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1904.09671v1", + "title": "DDGK: Learning Graph Representations for Deep Divergence Graph Kernels", + "abstract": "Can neural networks learn to compare graphs without feature engineering? In\nthis paper, we show that it is possible to learn representations for graph\nsimilarity with neither domain knowledge nor supervision (i.e.\\ feature\nengineering or labeled graphs). We propose Deep Divergence Graph Kernels, an\nunsupervised method for learning representations over graphs that encodes a\nrelaxed notion of graph isomorphism. Our method consists of three parts. First,\nwe learn an encoder for each anchor graph to capture its structure. Second, for\neach pair of graphs, we train a cross-graph attention network which uses the\nnode representations of an anchor graph to reconstruct another graph. This\napproach, which we call isomorphism attention, captures how well the\nrepresentations of one graph can encode another. We use the attention-augmented\nencoder's predictions to define a divergence score for each pair of graphs.\nFinally, we construct an embedding space for all graphs using these pair-wise\ndivergence scores.\n Unlike previous work, much of which relies on 1) supervision, 2) domain\nspecific knowledge (e.g. a reliance on Weisfeiler-Lehman kernels), and 3) known\nnode alignment, our unsupervised method jointly learns node representations,\ngraph representations, and an attention-based alignment between graphs.\n Our experimental results show that Deep Divergence Graph Kernels can learn an\nunsupervised alignment between graphs, and that the learned representations\nachieve competitive results when used as features on a number of challenging\ngraph classification tasks. Furthermore, we illustrate how the learned\nattention allows insight into the the alignment of sub-structures across\ngraphs.", + "authors": "Rami Al-Rfou, Dustin Zelle, Bryan Perozzi", + "published": "2019-04-21", + "updated": "2019-04-21", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.IR", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2110.05018v2", + "title": "Time-varying Graph Learning Under Structured Temporal Priors", + "abstract": "This paper endeavors to learn time-varying graphs by using structured\ntemporal priors that assume underlying relations between arbitrary two graphs\nin the graph sequence. Different from many existing chain structure based\nmethods in which the priors like temporal homogeneity can only describe the\nvariations of two consecutive graphs, we propose a structure named\n\\emph{temporal graph} to characterize the underlying real temporal relations.\nUnder this framework, the chain structure is actually a special case of our\ntemporal graph. We further proposed Alternating Direction Method of Multipliers\n(ADMM), a distributed algorithm, to solve the induced optimization problem.\nNumerical experiments demonstrate the superiorities of our method.", + "authors": "Xiang Zhang, Qiao Wang", + "published": "2021-10-11", + "updated": "2022-02-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "eess.SP" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/1905.10715v1", + "title": "Graph Attention Auto-Encoders", + "abstract": "Auto-encoders have emerged as a successful framework for unsupervised\nlearning. However, conventional auto-encoders are incapable of utilizing\nexplicit relations in structured data. To take advantage of relations in\ngraph-structured data, several graph auto-encoders have recently been proposed,\nbut they neglect to reconstruct either the graph structure or node attributes.\nIn this paper, we present the graph attention auto-encoder (GATE), a neural\nnetwork architecture for unsupervised representation learning on\ngraph-structured data. Our architecture is able to reconstruct graph-structured\ninputs, including both node attributes and the graph structure, through stacked\nencoder/decoder layers equipped with self-attention mechanisms. In the encoder,\nby considering node attributes as initial node representations, each layer\ngenerates new representations of nodes by attending over their neighbors'\nrepresentations. In the decoder, we attempt to reverse the encoding process to\nreconstruct node attributes. Moreover, node representations are regularized to\nreconstruct the graph structure. Our proposed architecture does not need to\nknow the graph structure upfront, and thus it can be applied to inductive\nlearning. Our experiments demonstrate competitive performance on several node\nclassification benchmark datasets for transductive and inductive tasks, even\nexceeding the performance of supervised learning baselines in most cases.", + "authors": "Amin Salehi, Hasan Davulcu", + "published": "2019-05-26", + "updated": "2019-05-26", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI", + "stat.ML" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2108.01660v3", + "title": "Graph Neural Networks With Lifting-based Adaptive Graph Wavelets", + "abstract": "Spectral-based graph neural networks (SGNNs) have been attracting increasing\nattention in graph representation learning. However, existing SGNNs are limited\nin implementing graph filters with rigid transforms (e.g., graph Fourier or\npredefined graph wavelet transforms) and cannot adapt to signals residing on\ngraphs and tasks at hand. In this paper, we propose a novel class of graph\nneural networks that realizes graph filters with adaptive graph wavelets.\nSpecifically, the adaptive graph wavelets are learned with neural\nnetwork-parameterized lifting structures, where structure-aware attention-based\nlifting operations (i.e., prediction and update operations) are developed to\njointly consider graph structures and node features. We propose to lift based\non diffusion wavelets to alleviate the structural information loss induced by\npartitioning non-bipartite graphs. By design, the locality and sparsity of the\nresulting wavelet transform as well as the scalability of the lifting structure\nare guaranteed. We further derive a soft-thresholding filtering operation by\nlearning sparse graph representations in terms of the learned wavelets,\nyielding a localized, efficient, and scalable wavelet-based graph filters. To\nensure that the learned graph representations are invariant to node\npermutations, a layer is employed at the input of the networks to reorder the\nnodes according to their local topology information. We evaluate the proposed\nnetworks in both node-level and graph-level representation learning tasks on\nbenchmark citation and bioinformatics graph datasets. Extensive experiments\ndemonstrate the superiority of the proposed networks over existing SGNNs in\nterms of accuracy, efficiency, and scalability.", + "authors": "Mingxing Xu, Wenrui Dai, Chenglin Li, Junni Zou, Hongkai Xiong, Pascal Frossard", + "published": "2021-08-03", + "updated": "2022-01-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2209.00793v2", + "title": "Structure-Preserving Graph Representation Learning", + "abstract": "Though graph representation learning (GRL) has made significant progress, it\nis still a challenge to extract and embed the rich topological structure and\nfeature information in an adequate way. Most existing methods focus on local\nstructure and fail to fully incorporate the global topological structure. To\nthis end, we propose a novel Structure-Preserving Graph Representation Learning\n(SPGRL) method, to fully capture the structure information of graphs.\nSpecifically, to reduce the uncertainty and misinformation of the original\ngraph, we construct a feature graph as a complementary view via k-Nearest\nNeighbor method. The feature graph can be used to contrast at node-level to\ncapture the local relation. Besides, we retain the global topological structure\ninformation by maximizing the mutual information (MI) of the whole graph and\nfeature embeddings, which is theoretically reduced to exchanging the feature\nembeddings of the feature and the original graphs to reconstruct themselves.\nExtensive experiments show that our method has quite superior performance on\nsemi-supervised node classification task and excellent robustness under noise\nperturbation on graph structure or node features.", + "authors": "Ruiyi Fang, Liangjian Wen, Zhao Kang, Jianzhuang Liu", + "published": "2022-09-02", + "updated": "2022-12-08", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2204.01855v2", + "title": "A Survey on Graph Representation Learning Methods", + "abstract": "Graphs representation learning has been a very active research area in recent\nyears. The goal of graph representation learning is to generate graph\nrepresentation vectors that capture the structure and features of large graphs\naccurately. This is especially important because the quality of the graph\nrepresentation vectors will affect the performance of these vectors in\ndownstream tasks such as node classification, link prediction and anomaly\ndetection. Many techniques are proposed for generating effective graph\nrepresentation vectors. Two of the most prevalent categories of graph\nrepresentation learning are graph embedding methods without using graph neural\nnets (GNN), which we denote as non-GNN based graph embedding methods, and graph\nneural nets (GNN) based methods. Non-GNN graph embedding methods are based on\ntechniques such as random walks, temporal point processes and neural network\nlearning methods. GNN-based methods, on the other hand, are the application of\ndeep learning on graph data. In this survey, we provide an overview of these\ntwo categories and cover the current state-of-the-art methods for both static\nand dynamic graphs. Finally, we explore some open and ongoing research\ndirections for future work.", + "authors": "Shima Khoshraftar, Aijun An", + "published": "2022-04-04", + "updated": "2022-06-15", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.SI" + ], + "category": "Graph AND Structure AND Learning" + }, + { + "url": "http://arxiv.org/abs/2202.10688v2", + "title": "Graph Lifelong Learning: A Survey", + "abstract": "Graph learning is a popular approach for performing machine learning on\ngraph-structured data. It has revolutionized the machine learning ability to\nmodel graph data to address downstream tasks. Its application is wide due to\nthe availability of graph data ranging from all types of networks to\ninformation systems. Most graph learning methods assume that the graph is\nstatic and its complete structure is known during training. This limits their\napplicability since they cannot be applied to problems where the underlying\ngraph grows over time and/or new tasks emerge incrementally. Such applications\nrequire a lifelong learning approach that can learn the graph continuously and\naccommodate new information whilst retaining previously learned knowledge.\nLifelong learning methods that enable continuous learning in regular domains\nlike images and text cannot be directly applied to continuously evolving graph\ndata, due to its irregular structure. As a result, graph lifelong learning is\ngaining attention from the research community. This survey paper provides a\ncomprehensive overview of recent advancements in graph lifelong learning,\nincluding the categorization of existing methods, and the discussions of\npotential applications and open research problems.", + "authors": "Falih Gozi Febrinanto, Feng Xia, Kristen Moore, Chandra Thapa, Charu Aggarwal", + "published": "2022-02-22", + "updated": "2022-11-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "68T07, 68T05", + "I.2.6" + ], + "category": "Graph AND Structure AND Learning" + } +] \ No newline at end of file