diff --git "a/related_34K/test_related_short_2404.17364v2.json" "b/related_34K/test_related_short_2404.17364v2.json" new file mode 100644--- /dev/null +++ "b/related_34K/test_related_short_2404.17364v2.json" @@ -0,0 +1,1401 @@ +[ + { + "url": "http://arxiv.org/abs/2404.17364v2", + "title": "MV-VTON: Multi-View Virtual Try-On with Diffusion Models", + "abstract": "The goal of image-based virtual try-on is to generate an image of the target\nperson naturally wearing the given clothing. However, most existing methods\nsolely focus on the frontal try-on using the frontal clothing. When the views\nof the clothing and person are significantly inconsistent, particularly when\nthe person's view is non-frontal, the results are unsatisfactory. To address\nthis challenge, we introduce Multi-View Virtual Try-ON (MV-VTON), which aims to\nreconstruct the dressing results of a person from multiple views using the\ngiven clothes. On the one hand, given that single-view clothes provide\ninsufficient information for MV-VTON, we instead employ two images, i.e., the\nfrontal and back views of the clothing, to encompass the complete view as much\nas possible. On the other hand, the diffusion models that have demonstrated\nsuperior abilities are adopted to perform our MV-VTON. In particular, we\npropose a view-adaptive selection method where hard-selection and\nsoft-selection are applied to the global and local clothing feature extraction,\nrespectively. This ensures that the clothing features are roughly fit to the\nperson's view. Subsequently, we suggest a joint attention block to align and\nfuse clothing features with person features. Additionally, we collect a MV-VTON\ndataset, i.e., Multi-View Garment (MVG), in which each person has multiple\nphotos with diverse views and poses. Experiments show that the proposed method\nnot only achieves state-of-the-art results on MV-VTON task using our MVG\ndataset, but also has superiority on frontal-view virtual try-on task using\nVITON-HD and DressCode datasets. Codes and datasets will be publicly released\nat https://github.com/hywang2002/MV-VTON .", + "authors": "Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, Wangmeng Zuo", + "published": "2024-04-26", + "updated": "2024-04-29", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Original Paper", + "paper_cat": "Diffusion AND Model", + "gt": "2.1 GAN-Based Virtual Try-On Most of the previous methods are aimed at the frontal-view VTON task. To reconstruct realistic results, existing methods based on generative adversarial networks (GAN) [10] are typically divided into two steps. Firstly, the frontal-view clothing is deformed to initially align with the target person\u2019s pose. Afterward, the warped clothing and target person are fused through a GAN-based generator. In the warping step, some methods [8, 30, 39, 48] use TPS transformation to deform the frontal-view flattened clothing, and others [9, 24, 44] predict the global and local optical flow required for clothing deformation. However, when the clothing possesses intricate high-frequency details and the person\u2019s pose is complex, the effectiveness of clothing deformation is often significantly diminished. Moreover, GAN-based generators generally encounter challenges in convergence and are highly susceptible to mode collapse [29], leading to noticeable artifacts at the junction between warped clothing and the target person in the final results. In addition, previous multi-pose virtual try-on methods [6, 40, 49] can MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON change the person\u2019s pose, but are also limited by GAN-based generator and insufficient clothing information. In this paper, we leverage the potent generative capability of a large-scale pre-trained diffusion model to implicitly deform the clothing from frontal and back views. Simultaneously, we employ the view-adaptive selection to choose the clothing features from the two views. 2.2 Diffusion-Based Virtual Try-On Thanks to the rapid advancement of diffusion models, recent works have sought to utilize the generative prior of large-scale pre-trained diffusion models [17, 34, 38, 47] to tackle frontal-view virtual try-on tasks. TryOnDiffusion [52] introduces two U-Nets to encode target person and frontal-view clothing images respectively, and interacts with the features of the two branches through the cross-attention mechanism. LaDI-VTON [30] encodes the frontal-view clothing image through textual inversion [7, 43] and serves as the conditional input of backbone. DCI-VTON [11] first conducts an initial deformation of frontal-view clothing by incorporating a pre-trained wrapping network [9]. Subsequently, it attaches the deformed clothing to the target person image and feeds it into the diffusion model. While their frontal-view virtual try-on results seem more natural compared to GAN-based methods, they face difficulties in preserving high-frequency details due to the loss of details from the CLIP image encoder [33]. To address this problem, StableVITON [22] attempts to introduce an additional encoder [50] to encode the local features of frontal-view clothing, and align the obtained clothing features through the zero cross-attention block. However, StableVITON [22] aligns the global features and local features of clothing separately without considering direct interaction between the two, making it difficult to refine the details while preserving global semantic information. Therefore, we introduce the joint attention blocks, building upon the foundation of maintaining two encoders to extract the global and local features of clothing, respectively. We first align the global and local features to the target person, then selectively fuse the two to enhance the preservation of local texture and global semantic information in the original clothing.", + "pre_questions": [], + "main_content": "INTRODUCTION Virtual Try-On (VTON) is a classic yet intriguing technology. It can be applied in the field of fashion, and clothes online shopping to improve user experience. VTON aims to render the visual effect of a person wearing a specified garment. The emphasis of this technology lies in reconstructing a realistic image that faithfully preserves personal attributes (e.g., appearance and pose) and accurately represents clothing shape and details. Most early VTON methods [2, 5, 13, 14, 24, 25, 44, 48] are based on generative adversarial networks [10] (GANs). They generally align the clothing to the person\u2019s pose through a warping network, and then employ a generator to fuse the warped clothing with the person. However, it poses a challenge to ensure that the warped clothing fits the target person\u2019s pose, and inaccurate clothes features will easily lead to distortion results. Recently, the diffusion models [19, 32, 34, 50] have made remarkable strides in the field of image generation [18, 20, 27, 35, 45]. Leveraging its potent generative capabilities, some researchers [11, 22, 30, 46, 52] have integrated it into virtual try-on endeavors, building upon previous work and achieving commendable results. For example, LaDI-VTON [30] encodes the garment through textual inversion as the conditional input for Stable Diffusion [34] backbone, and introduces a residual connection between the VAE [23] encoder and decoder to preserve person\u2019s face and hands. DCI-VTON [11] attaches the pre-warped garment to the inpainting area, providing a priori information for the diffusion model. StableVITON [22] introduces an additional encoder and attention blocks to deform the clothing. Although VTON has made great progress, most existing methods focus on taking the frontal garment to perform the frontal tryon. In practical applications, such as online shopping for clothes, customers may expect to obtain the dressing effect on multiple views (e.g., side or back). In this case, the pose of the garment may be seriously inconsistent with the person\u2019s posture, and the single-view clothing may not be enough to provide complete try-on information. Thus, these methods will easily generate results with poorly deformed clothing, and lead to the loss of high-frequency details such as texts, patterns, and other textures on clothing. To address the issue, we introduce Multi-View Virtual Try-ON (MV-VTON), which aims to faithfully reconstruct the appearance and attire of a person from various views. MV-VTON should not only address the challenge of natural warping on the clothing\u2019s front but also tackle the issue of inconsistency between the poses of the given clothing and target person. For example, for hooded sweaters, which may exhibit significant differences between frontal and back styles, MV-VTON should display them from various views to provide a more comprehensive fitting result. Thus, only providing single clothing can\u2019t meet the needs of dressing up. Instead, we utilize both the frontal and back views of the clothing, which covers approximately the complete view with as few images as possible. Given the frontal and back clothing, we utilize the popular diffusion method to achieve MV-VTON. It is natural but doesn\u2019t work well to simply concatenate two pieces of clothing together as conditions of diffusion models, as it is difficult for the model to learn how to assign two-view clothes to a person, especially when the person is sideways. Instead, we propose a view-adaptive selection mechanism, which picks appropriate features of two-view clothes based on the posture information of the person and clothes. Therein, the hard-selection module chooses one of the two clothes for global feature extraction, and the soft-selection module modulates the local features of two clothes. We utilize CLIP [33] and an additional multi-scale encoder to extract the global and local clothing features, respectively. Moreover, to enhance the preservation of high-frequency details in clothing, we present the joint attention block. It independently aligns global and local features with the person features, and selectively fuses them to refine the local clothing details while preserving global semantic information. Furthermore, we collect a multi-view virtual try-on dataset, named Multi-View Garment (MVG). It contains thousands of samples, and each sample contains 5 images under different views and poses. We conduct extensive experiments not only on MV-VTON task using the MVG dataset, but also on the frontal-view VTON task using VITON-HD [5] and DressCode [31] datasets. The results demonstrate that our method outperforms existing methods on both tasks, quantitatively and qualitatively. In summary, our contributions are outlined below: \u2022 We introduce a novel Multi-View Virtual Try-ON (MV-VTON) task, which aims at generating realistic dressing-up results of the multi-view person by using the given frontal and back clothing. We propose a view-adaptive selection method, where hard-selectio multi-view person by using the given frontal and back clothing. \u2022 We propose a view-adaptive selection method, where hard-selection and soft-selection are applied to global and local clothing feature extraction, respectively. It ensures that the clothing features are roughly fit to the person\u2019s view. We propose the joint attention block to align the global and local roughly fit to the person\u2019s view. \u2022 We propose the joint attention block to align the global and local features of selected clothing with the person ones, and fuse them. We collect a multi-view virtual try-on dataset. Extensive experfeatures of selected clothing with the person ones, and fuse them. \u2022 We collect a multi-view virtual try-on dataset. Extensive experiments demonstrate that our method outperforms previous approaches quantitatively and qualitatively in both frontal-view and multi-view virtual try-on tasks. While most existing virtual try-on methods are designed solely for frontal-view scenarios, we present a novel approach to handle both frontal-view and multi-view virtual try-on tasks, along with a multi-view virtual try-on dataset MVG comprising try-on images captured from five different views. Examples of our dataset are shown in Figure 2(b). Formally, given a person image \ud835\udc65\u2208\ud835\udc45\ud835\udc3b\u00d7\ud835\udc4a\u00d73 in a frontal, side, or back view, along with a frontal view clothing shown in Figure 2(b). Formally, given a person image \ud835\udc65\u2208\ud835\udc45\u00d7\u00d7 in a frontal, side, or back view, along with a frontal view clothing \ud835\udc50\ud835\udc53\u2208\ud835\udc45\ud835\udc3b\u00d7\ud835\udc4a\u00d73 and a back view clothing \ud835\udc50\ud835\udc4f\u2208\ud835\udc45\ud835\udc3b\u00d7\ud835\udc4a\u00d73, our goal is to generate the result of the person wearing the clothing in its view. Considering the substantial differences between the front and back of most clothing items, compared to previous frontal-view virtual try-on methods, another challenge we face is how to make informed decisions regarding the two provided clothing images based on the target person\u2019s pose, ensuring a natural try-on result across multiple views. In this work, we use an image inpainting diffusion model [47] as the baseline of our method, it concatenates the noisy image \ud835\udc67\ud835\udc61, the (a) Frontal View (b) Multi-view Figure 2: Comparison between previous datasets and our proposed MVG dataset. (a) is the datasets used by the previous work, which only have clothing and person in the frontalview. In contrast, each sample in our dataset (b) offers images from five different views. By utilizing the first and fifth images to provide frontal and back view clothing, we can reconstruct multi-view virtual try-on results. encoded clothing-agnostic image E(\ud835\udc4e), and the resized clothingagnostic mask \ud835\udc5ain the channel dimension and feeds them into the backbone as spatial input. Therein, \ud835\udc4eis the masked person image \ud835\udc65using the inpainting mask \ud835\udc40\u2208\ud835\udc45\ud835\udc3b\u00d7\ud835\udc4a\u00d71. In addition, we use an existing method to initially deform the clothing and paste it on \ud835\udc4e. \ud835\udc5ais the result of resizing \ud835\udc40to latent space and E represents VAE encoder [23] in latent diffusion model [34]. While utilizing CLIP image encoder [33] to encode clothing as the global condition of the diffusion model, we also introduce an additional encoder [47, 50] to encode clothing to provide more refined local conditions. Since both the frontal and back view clothing need to be encoded, directly sending both into the backbone as conditions may result in confusion of clothing features. To alleviate this problem, we propose a view-adaptive selection mechanism. Based on the similarity between poses of the person and two clothes, it conducts hard-selection when extracting global features and soft-selection when extracting local features. To enhance high-frequency details in global features using local ones, while preserving semantic information in clothing, we introduce a joint attention block. It first independently aligns global and local features to the person ones and then selectively fuses them. Figure 3(a) depicts an overview of our proposed method. 3.1 Preliminaries for Diffusion Models Diffusion Models [17, 34, 38] have demonstrated strong capabilities in visual generation, which transforms a Gaussian distribution into a target distribution by iterative denoising. In particular, Stable Diffusion [34] is a widely used generative diffusion model, which consists of a CLIP text encoder [33] E\ud835\udc47, a VAE [23] encoder E as well as decoder D, and a time-conditional denoising model \ud835\udf16\ud835\udf03. The text encoder E\ud835\udc47encodes the input text prompt \ud835\udc66as conditional input. The VAE encoder E compresses the input image \ud835\udc3cinto latent space to get the latent variable \ud835\udc670 = E(\ud835\udc3c). In contrast, the VAE decoder D decodes the output of backbone from latent space to pixel space. Through the VAE encoder E, at an arbitrary time step t, the forward process is performed: \ufffd \u221a \u221a E ward process is performed: \ud835\udefc:= \ufffd\ud835\udc61 \ud835\udc60=1(1 \u2212\ud835\udefd\ud835\udc60) , \ud835\udc67\ud835\udc61= \u221a\ud835\udefc\ud835\udc61\ud835\udc670 + \u221a1 \u2212\ud835\udefc\ud835\udc61\ud835\udf16, (1) arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo \u2130\ud835\udc5d(\ud835\udc5d\u210e) \u2130\ud835\udc5d(\ud835\udc5d\ud835\udc53) \ud835\udc50\ud835\udc53 i \uf056 \u2130\ud835\udc5d(\ud835\udc5db) \u2130\ud835\udc5d(\ud835\udc5d\u210e) \ud835\udc50\ud835\udc4f i \uf056 \ud835\udc50\ud835\udc59 i C \ud835\udc67\ud835\udc61 \u2130(a) \ud835\udc67\ud835\udc61\u22121 CLIP Hard-Selection \ud835\udc50\ud835\udc53 \ud835\udc50\ud835\udc4f \ud835\udc50\ud835\udc53 \ud835\udc50\ud835\udc4f \u2130\ud835\udc59 U-Net Frozen Trainable Multi-View Person \ud835\udc50\ud835\udc59 3 \ud835\udc50\ud835\udc59 2 \ud835\udc50\ud835\udc59 1 c\ud835\udc54 (a) Overview of MV-VTON \uf056Multiplication C Concatenate (b) Soft-Selection Block Softmax \uf056 Frontal View Flow Back View Flow \ud835\udc43\u210e \ud835\udc56 \ud835\udc43f \ud835\udc56 \ud835\udc36 \ud835\udc53 \ud835\udc56\ud835\udc36\ud835\udc4f \ud835\udc56 \ud835\udc43\ud835\udc4f \ud835\udc56 \ud835\udc43\u210e \ud835\udc56 \ud835\udc4a \u210e \ud835\udc56 \ud835\udc4a \ud835\udc53 \ud835\udc56 \ud835\udc4a \ud835\udc50\ud835\udc56 \ud835\udc4a \ud835\udc4f \ud835\udc56 \ud835\udc4a \u210e \ud835\udc56 \uf056 Softmax \u2130\ud835\udc5d Pose: \ud835\udc5d\ud835\udc53/\ud835\udc5d\ud835\udc4f/ \ud835\udc5d\u210e Transformer Block Soft-Selection Block Joint Attention Block Pose: \ud835\udc5d\ud835\udc53/\ud835\udc5d\ud835\udc4f/ \ud835\udc5d\u210e Pose Encoder Pose of Frontal Clothing Pose of Back Clothing Pose of Person \ud835\udc5d\ud835\udc53: \ud835\udc5d\ud835\udc4f: \ud835\udc5d\u210e: Figure 3: (a) Overview of MV-VTON. It encodes frontal and back view clothing into global features using the CLIP image encoder [33] and extracts multi-scale local features through backbone encoder\u2019s trainable copy E\ud835\udc59. Both features act as conditional inputs for the decoder of backbone. Besides, both features are selectively extracted through view-adaptive selection mechanism. (b) Soft-selection modulates the clothing features on frontal and back view, respectively, based on the similarity between the clothing\u2019s pose and the person\u2019s pose. Then the features from both views are concatenated in the channel dimension. where \ud835\udf16\u223cN (0, 1) is the random Gaussian noise and \ud835\udefdis a predefined variance schedule. The training objective of the diffusion model is to acquire a noise prediction network that minimizes the disparity between the predicted noise and the noise added to ground truth. The loss function can be defined as, L\ud835\udc3f\ud835\udc37\ud835\udc40= EE(\ud835\udc3c),\ud835\udc66,\ud835\udf16\u223cN(0,1),\ud835\udc61[\u2225\ud835\udf16\u2212\ud835\udf16\ud835\udf03(\ud835\udc67\ud835\udc61,\ud835\udc61, E\ud835\udc47(\ud835\udc66))\u22252 2], (2) where \ud835\udc61is the time step, and \ud835\udc67\ud835\udc61represents the encoded image E(\ud835\udc3c) with random Gaussian noise \ud835\udf16\u223cN (0, 1) added. In our work, we use an exemplar-based inpainting model [47] as a backbone, which employs an image \ud835\udc50rather than texts as the prompt and then encode \ud835\udc50by the image encoder E\ud835\udc3cof CLIP [33]. Thus, the loss function in Eq. (2) can be modified as, L\ud835\udc3f\ud835\udc37\ud835\udc40= EE(\ud835\udc3c),\ud835\udc50,\ud835\udf16\u223cN(0,1),\ud835\udc61[\u2225\ud835\udf16\u2212\ud835\udf16\ud835\udf03(\ud835\udc67\ud835\udc61,\ud835\udc61, E\ud835\udc3c(\ud835\udc50))\u22252 2]. (3) 3.2 View-Adaptive Selection For multi-view virtual try-on task, given the substantial differences between the frontal and back view, as illustrated in Figure 2(b), it\u2019s imperative to extract and assign the features of frontal and back view clothing for the person tendentiously. Actually, based on the pose of the target person, we can determine which view\u2019s clothing should be given more attention during the try-on process. For example, if the target pose resembles the pose in the fourth column of Figure 2(b), it\u2019s evident that we should rely more on the characteristics of the back view clothing to generate the try-on result. Specifically, we propose a view-adaptive selection mechanism to achieve this purpose, including hardand soft-selection. Hard-Selection for Global Clothing Features. The image encoder of CLIP [33] is used to extract global features of clothing. Before this process, we perform hard-selection on the clothing images under the frontal and back view based on the similarity between the garments\u2019 pose and the person\u2019s pose. It means that we only select one piece of clothing that is closest to the person\u2019s pose as the input of the image encoder, since it is enough to cover global semantic information. When generating pre-warped clothing in E(\ud835\udc4e), the selection is also performed. Implementation details of hard-selection can be found in the supplementary material. Soft-Selection for Local Clothing Features. The multi-scale local features of frontal and back view clothing are extracted through an additional encoder E\ud835\udc59. Considering that relying solely on the clothing from the frontal or back view may not be sufficient when reconstructing the try-on results under certain specific scenes, such as the third column shown in Figure 2(b). In these cases, it may be necessary to incorporate clothing features from both the frontal and back views. However, simply combining the two may lead to confusion of features. Instead, we introduce soft-selection block to modulate their local features, respectively, as shown in Figure 3(b). Firstly, the person\u2019s pose \ud835\udc5d\u210e, frontal-view clothing\u2019s pose \ud835\udc5d\ud835\udc53and back view clothing\u2019s pose \ud835\udc5d\ud835\udc4fare encoded by the pose encoder E\ud835\udc5dto obtain their respective features E\ud835\udc5d(\ud835\udc5d\u210e), E\ud835\udc5d(\ud835\udc5d\ud835\udc53) and E\ud835\udc5d(\ud835\udc5d\ud835\udc4f). MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON \ud835\udc53 \ud835\udc56\ud835\udc5b i Self Attention Self Attention Cross Attention \ud835\udc50\ud835\udc59 i K V \uf038 Cross Attention \u2295 \ud835\udc53 \ud835\udc5c\ud835\udc62\ud835\udc61 i Feed Forward Feed Forward K V Q Q \ud835\udc50g Learnable Fusion Vector \u2295Addition \uf038Channel-Wise Multiplication Sharing Weights Sharing Weights Figure 4: Overview of the proposed joint attention block. Details of the pose encoder can be found in the supplementary material. When processing frontal-view clothing, in \ud835\udc56-th soft-selection block, we map E\ud835\udc5d(\ud835\udc5d\u210e) and E\ud835\udc5d(\ud835\udc5d\ud835\udc53) to \ud835\udc43\ud835\udc56 \u210eand \ud835\udc43\ud835\udc56 \ud835\udc53through a linear layer with weights \ud835\udc4a\ud835\udc56 \u210eand \ud835\udc4a\ud835\udc56 \ud835\udc53, respectively. For the features of frontal-view clothing \ud835\udc50\ud835\udc56 \ud835\udc53, it will be mapped to \ud835\udc36\ud835\udc56 \ud835\udc53through a linear layer with weights\ud835\udc4a\ud835\udc56 \ud835\udc50. Then we calculate the similarity between the person\u2019s pose and frontal-view clothing\u2019s pose to get the selection weights of frontal-view clothing, i.e., \ud835\udc64\ud835\udc52\ud835\udc56\ud835\udc54\u210e\ud835\udc61\ud835\udc60= \ud835\udc60\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65( \ud835\udc43\ud835\udc56 \u210e(\ud835\udc43\ud835\udc56 \ud835\udc53)\ud835\udc47 \u221a \ud835\udc51 ), (4) where \ud835\udc64\ud835\udc52\ud835\udc56\ud835\udc54\u210e\ud835\udc61\ud835\udc60represents the selection weights of frontal-view clothing, and \ud835\udc51represents the dimension of these matrices. Assuming that the person\u2019s pose is biased towards the front, as depicted in the second column of Figure 2(b), the similarity between the person\u2019s pose and the front view clothing\u2019s pose will be higher. Consequently, the corresponding clothing features will be enhanced by \ud835\udc64\ud835\udc52\ud835\udc56\ud835\udc54\u210e\ud835\udc61\ud835\udc60, and vice versa. The features of back view clothing \ud835\udc50\ud835\udc56 \ud835\udc4fundergo similar processing. Consequently, one of the features \ud835\udc50\ud835\udc56 \ud835\udc53and \ud835\udc50\ud835\udc56 \ud835\udc4fin the two views will be enhanced while the other will be weakened. Finally, the two selected clothing features are concatenated along the channel dimension as the local condition \ud835\udc50\ud835\udc56 \ud835\udc59of backbone. 3.3 Joint Attention Block Global clothing features \ud835\udc50\ud835\udc54provide identical conditions for blocks at each scale of U-Net, and multi-scale local clothing features \ud835\udc50\ud835\udc59 allow for reconstructing more accurate details. We present a joint attention block to align \ud835\udc50\ud835\udc54and \ud835\udc50\ud835\udc59with the current person features, as shown in Figure 4. Additionally, to retain most of the semantic information in global features \ud835\udc50\ud835\udc54, we use local features \ud835\udc50\ud835\udc59to refine some lost and erroneous detailed texture information in \ud835\udc50\ud835\udc54 by selective fusion. Specifically, in the \ud835\udc56-th joint attention block, we first calculate self-attention for the current features \ud835\udc53\ud835\udc56 \ud835\udc56\ud835\udc5b, followed by double crossattention where the queries (Q) come from \ud835\udc53\ud835\udc56 \ud835\udc56\ud835\udc5band global features \ud835\udc50\ud835\udc54serve as one set of keys (K) and values (V), while local features \ud835\udc50\ud835\udc56 \ud835\udc59serve as another set of keys (K) and values (V). After aligning to the person\u2019s pose through cross-attention, the clothing features \ud835\udc50\ud835\udc54and \ud835\udc50\ud835\udc56 \ud835\udc59are selectively fused in channel-wise dimension, i.e., \ud835\udc53\ud835\udc56 \ud835\udc5c\ud835\udc62\ud835\udc61= \ud835\udc60\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65( \ud835\udc44\ud835\udc56 \ud835\udc54(\ud835\udc3e\ud835\udc56 \ud835\udc54)\ud835\udc47 \u221a \ud835\udc51 )\ud835\udc49\ud835\udc56 \ud835\udc54+ \ud835\udf06\u2299\ud835\udc60\ud835\udc5c\ud835\udc53\ud835\udc61\ud835\udc5a\ud835\udc4e\ud835\udc65( \ud835\udc44\ud835\udc56 \ud835\udc59(\ud835\udc3e\ud835\udc56 \ud835\udc59)\ud835\udc47 \u221a \ud835\udc51 )\ud835\udc49\ud835\udc56 \ud835\udc59, (5) where \ud835\udc44\ud835\udc56 \ud835\udc54, \ud835\udc3e\ud835\udc56 \ud835\udc54,\ud835\udc49\ud835\udc56 \ud835\udc54represent the Q, K, V of global branch, \ud835\udc44\ud835\udc56 \ud835\udc59, \ud835\udc3e\ud835\udc56 \ud835\udc59,\ud835\udc49\ud835\udc56 \ud835\udc59 represent the Q, K, V of local branch, \ud835\udf06is the learnable fusion vector, \u2299represents channel-wise multiplication, and \ud835\udc53\ud835\udc56 \ud835\udc5c\ud835\udc62\ud835\udc61represents the clothing features after selective fusion. By engaging and fusing the global and local features of garments, we can enhance the retention of high-frequency details like texts and patterns in original clothing. 3.4 Training Objectives As stated in Section 3.1, diffusion models learn to generate images from random Gaussian noise. However, the training objective in Eq. (3) is performed in latent space, and does not explicitly constrain the generated results in visible image space, resulting in slight differences in color from the ground truth. To alleviate the problem, we additionally employ \u21131 loss L1 and perceptual loss [21] L\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50. The L1 loss is calculated by L1 = \u2225\u02c6 \ud835\udc65\u2212\ud835\udc65\u22251 , (6) where \u02c6 \ud835\udc65is the reconstructed image using Eq. (1). The perceptual loss is calculated as, L\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50= 5 \u2211\ufe01 \ud835\udc58=1 \u2225\ud835\udf19\ud835\udc58( \u02c6 \ud835\udc65) \u2212\ud835\udf19\ud835\udc58(\ud835\udc65)\u22251 , (7) where \ud835\udf19\ud835\udc58represents the \ud835\udc58-th layer of VGG [37]. Totally, the overall training objective can be written as, L = L\ud835\udc3f\ud835\udc37\ud835\udc40+ \ud835\udf061L1 + \ud835\udf06\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50L\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50, (8) where \ud835\udf061 and \ud835\udf06\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50are the balancing weights. 4 EXPERIMENTS 4.1 Experiments Setting Multi-View Dataset: For the proposed multi-view virtual try-on task, we collect MVG dataset containing 1,009 samples. Each sample contains five images of the same person wearing the same garment from five different views, for a total of 5,045 images, as shown in Figure 2(b). The image resolution is about 1K. Collection details of this dataset are provided in the supplementary material. In this work, we split it into a training set with 812 samples and a test set with 197 samples. During training, we extract the clothing from the frontal and back view images (the first and fifth columns of Figure 2(b)) to reconstruct the try-on results from other views (the second to fourth columns of Figure 2(b)), and use the real images from the corresponding views as ground truth. During testing, users only need to provide two images in frontal and back views to generate try-on results of a person from multiple views. Frontal-View Datasets. The proposed method can also be applied to frontal-view virtual try-on task. Our frontal-view experiments are carried out on VITON-HD [5] and DressCode [31] datasets. VITON-HD [5] has 13,679 frontal-view person and upper-body clothing image pairs with a resolution of 1,024\u00d7768. DressCode [31] contains three categories: upper-body clothing, lower-body clothing, and dresses, with a total of more than 53,000 image pairs of arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo Table 1: Quantitative comparison with previous work on paired setting. For multi-view virtual try-on task, we show results on our proposed MVG dataset. For frontal-view virtual try-on task, we show results on VITON-HD dataset [5] and DressCode dataset [31]. The best results have been bolded. Note that all previous works have been finetuned on our proposed MVG dataset when comparing on multi-view virtual try-on task. Method MVG VITON-HD DressCode Upper Body LPIPS\u2193 SSIM\u2191 FID\u2193 KID\u2193 LPIPS\u2193 SSIM\u2191 FID\u2193 KID\u2193 LPIPS\u2193 SSIM\u2191 FID\u2193 KID\u2193 Paint by Example [47] 0.120 0.880 54.38 14.95 0.150 0.843 13.78 4.48 0.078 0.899 15.21 4.51 PF-AFN [9] 0.139 0.873 49.47 12.81 0.141 0.855 7.76 4.19 0.091 0.902 13.11 6.29 GP-VTON [44] 0.085 0.889 6.25 0.77 0.236 0.781 19.37 8.07 LaDI-VTON [30] 0.069 0.921 29.14 4.39 0.094 0.872 7.08 1.49 0.063 0.922 11.85 3.20 DCI-VTON [11] 0.062 0.929 25.71 0.95 0.074 0.893 5.52 0.57 0.043 0.937 11.87 1.91 StableVITON [22] 0.063 0.929 23.52 0.46 0.073 0.888 6.15 1.34 0.040 0.937 10.18 1.70 Ours 0.050 0.936 22.17 0.36 0.068 0.897 5.42 0.48 0.040 0.941 8.26 1.39 Table 2: Unpaired setting\u2019s quantitative results on our MVG dataset and VITON-HD dataset [5]. The best results have been bolded. Method MVG VITON-HD FIDu\u2193 KIDu\u2193 FIDu\u2193 KIDu\u2193 Paint by Example [47] 43.79 5.92 17.27 4.56 PF-AFN [9] 47.38 7.04 21.18 6.57 GP-VTON [44] 9.11 1.21 LaDI-VTON [30] 36.61 3.39 9.55 1.83 DCI-VTON [11] 36.03 3.79 8.93 1.07 StableVITON [22] 35.85 4.22 9.86 1.09 Ours 34.48 2.92 8.67 0.78 frontal-view clothing and persons. Among them, upper-body clothing contains 15,363 samples. For the division of the datasets, we follow the previous works [11, 22, 30]. For VITON-HD [5] dataset, we use 2,032 pairs as the test set and 11,647 pairs as the training set. For DressCode [31] dataset, we employ 1,800 pairs as the test set, and the rest as the training set. Evaluation Metrics. Following previous works [9, 11, 22, 30, 44, 47], we use four metrics to evaluate the performance of our method: Structural Similarity (SSIM) [41], Learned Perceptual Image Patch Similarity (LPIPS) [51], Frechet Inception Distance (FID) [16] and Kernel Inception Distance (KID) [3]. LPIPS [51] and SSIM [41] tend to measure the consistency of the generated try-on results with ground truth. FID [16] and KID [3] are employed to measure the realism of images. Specifically, for paired test setting, which means directly using the paired data in the dataset, we utilize the above four metrics for evaluation. For unpaired test setting, which means that the given garment is different from the garment originally worn by target person, we use FID and KID for evaluation, and in order to distinguish them from the paired setting, we named them FIDu and KIDu respectively. Implementation Details. We use Paint by Example [47] as the backbone of our method and copy the weights of its encoder to initialize E\ud835\udc59. The hyper-parameter \ud835\udf061 is set to 1e-1, and \ud835\udf06\ud835\udc5d\ud835\udc52\ud835\udc5f\ud835\udc50is set to 1e-4. We train our model on 2 NVIDIA Tesla A100 GPUs for 40 epochs with a batch size of 4 and a learning rate of 1e-5. We use AdamW [28] optimizer with \ud835\udefd1 = 0.9, \ud835\udefd2 = 0.999. Comparison Settings. We compare our method with Paint By Example [47], PF-AFN [9], GP-VTON [44], LaDI-VTON [30], DCIVTON [11] and StableVITON [22] on both frontal-view and multiview virtual try-on tasks. For multi-view virtual try-on, we compare these methods on the proposed MVG dataset. For the sake of fairness, we fine-tune the previous methods on the MVG dataset according to its original training settings. Since previous methods can only input a single clothing image, we input frontal and back view clothing respectively and select the best result. For frontal-view virtual try-on, we compare these methods on VITON-HD [5] and DressCode [31] datasets. Following previous works\u2019 settings, the proposed MV-VTON only inputs one frontal-view garment during training and inference. When evaluating previous methods, we directly use their released models to test, and train the models if not available. 4.2 Quantitative Evaluation Table 1 reports the quantitative results on the paired setting, and Table 2 shows the unpaired setting\u2019s results. On the multi-view virtual try-on task, as can be seen, thanks to the view-adaptive selection mechanism, our method can reasonably select clothing features according to the person\u2019s pose, so it is better than existing methods in various metrics, especially on LPIPS and SSIM. Furthermore, owing to the joint attention block, our approach excels in preserving high-frequency details of the original garments across both frontal-view and multi-view virtual try-on scenarios, thus achieving superior performance in these metrics. 4.3 Qualitative Evaluation Multi-View Virtual Try-On. As shown in Figure 5, MV-VTON generates more realistic multi-view results compared to the previous five methods. Specifically, in the first row of Figure 5, due to the lack of adaptive selection of clothes, previous methods have difficulty in generating hoods of the original cloth. Moreover, in the second and third rows, previous methods often struggle to maintain fidelity to the original garments. In contrast, our method effectively MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON Frontal view cloth Back view cloth Person Paint By Example PF-AFN LaDI-VTON DCI-VTON StableVITON Ours Figure 5: Qualitative comparison on multi-view virtual try-on task. Clothing Person Paint By Example PF-AFN GP-VTON LaDI-VTON DCI-VTON StableVITON Ours Figure 6: Qualitative comparison on frontal-view virtual try-on task. addresses the aforementioned problems and generates high-fidelity results. Frontal-View Virtual Try-On. As shown in Figure 6, our method also demonstrates superior performance over existing methods on frontal-view virtual try-on task, particularly in retaining clothing details. Specifically, in the second row of the Figure 6, our method better preserves the literal \u2019KORS\u2019 in the clothing. In addition, benefiting from the selective fusion of global and local clothing features in joint attention blocks, our method not only preserves the shape of special garments (in the first row), but also faithfully generates complex patterns (in the third row). 4.4 User Study We conduct a user study involving 28 participants, each of which is presented with test results generated by our and previous methods. The participants evaluate the results based on fidelity (indicating whether the generated clothing is faithful to original clothing) and identity (indicating whether human body details such as the face arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo (w/o) hard & soft-selection (w/o) hard-selection (w/o) soft-selection Ours Person Back Frontal Person Back Frontal Figure 7: Visual comparison of effect of the proposed viewadaptive selection mechanism on MVG dataset. (w/o) local features (w/o) global features Ours Agnostic person Clothing Agnostic person Clothing Figure 8: Visual comparison of effect of the proposed joint attention block on VITON-HD [5] dataset. and hands are preserved well) and choose the best one among all methods. We then calculate the proportion of the best images to the total results for each method. As shown in Table 3, the proposed method outperforms previous methods both on fidelity and identity. 4.5 Ablation Study Effect of View-Adaptive Selection. We investigate the effect of view-adaptive selection on the multi-view virtual try-on task. Specifically, on the basis of no clothing selection, we add hardselection when extracting global features and soft-selection when extracting local features, respectively. No hard-selection represents that we directly concatenate two garments\u2019 features encoded by CLIP [33], and no soft-selection means that two clothing features are concatenated without passing soft-selection blocks. Comparison results are shown in Table 4 and Figure 7. As can be seen, without any selection, the performance is greatly reduced. Merely employing hard-selection or soft-selection can enhance performance, but Table 3: User study results. We make comparisons with previous works on MVG and VITON-HD [5] datasets. Method MVG VITON-HD Fidelity\u2191 Identity\u2191 Fidelity\u2191 Identity\u2191 Paint by Example [47] 0.76% 0.33% 0.32% 1.66% PF-AFN [9] 0.37% 0.63% 0.17% 0.19% GP-VTON [44] 11.4% 0.42% LaDI-VTON [30] 11.38% 19.81% 3.37% 9.57% DCI-VTON [11] 3.77% 8.3% 14.13% 13.63% StableVITON [22] 15.13% 29.2% 17.9% 27.42% Ours 68.59% 41.73% 52.71% 47.11% Table 4: Ablation study of our proposed view-adaptive selection mechanism on MVG dataset. Hard Soft LPIPS\u2193 SSIM\u2191 FID\u2193 KID\u2193 FIDu\u2193 KIDu\u2193 \u00d7 \u00d7 0.068 0.925 25.13 0.77 35.28 3.24 \u00d7 \u221a 0.064 0.928 24.58 0.62 34.67 3.05 \u221a \u00d7 0.052 0.934 22.18 0.43 33.47 2.74 \u221a \u221a 0.050 0.936 22.18 0.35 33.44 2.69 Table 5: Ablation study of the joint attention block on MVG and VITON-HD [5] datasets. Global Local LPIPS\u2193 SSIM\u2191 FID\u2193 KID\u2193 FIDu\u2193 KIDu\u2193 MVG \u221a \u00d7 0.062 0.929 25.71 0.95 36.01 3.78 \u00d7 \u221a 0.058 0.931 26.16 1.21 36.29 3.91 \u221a \u221a 0.050 0.936 22.18 0.35 33.44 2.69 VITON-HD \u221a \u00d7 0.074 0.893 5.52 0.57 8.93 1.07 \u00d7 \u221a 0.070 0.896 5.76 0.81 9.15 1.09 \u221a \u221a 0.069 0.897 5.43 0.49 8.67 0.78 the generated results still exhibit some unnaturalness, such as the unrealistic hoods and black stripes, as shown in Figure 7. In contrast, by combining hard-selection and soft-selection, our method can generate more faithful try-on results. Effect of Joint Attention Block. In order to demonstrate the effectiveness of fusing global and local features through joint attention blocks, we discard the global feature extraction branch and the local feature extraction branch respectively. Results are shown in Table 5 and Figure 8. As can be seen, relying solely on global features may lead to loss of details, such as the distorted text \u2019VANS\u2019 in the first row and the missing letter \u2019C\u2019 in the second row. Moreover, if only local features are provided, the results may also have unfaithful textures, such as artifacts on the person\u2019s chest. Compared to them, we fuse global and local features through joint attention blocks, which can refine details in garments while preserving semantic information. 5 CONCLUSION We introduce a novel and practical Multi-View Virtual Try-ON (MV-VTON) task, which aims at using the frontal and back clothing to reconstruct the dressing results of a person from multiple views. To achieve the task, we propose a diffusion-based method. MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON Specifically, the view-adaptive selection mechanism exacts more reasonable clothing features based on the similarity between the poses of a person and two clothes. The joint attention block aligns the global and local features of the selected clothing to the target person, and fuse them. In addition, we collect a multi-view garment dataset for this task. Extensive experiments demonstrate that the proposed method achieves state-of-the-art performance both on frontal-view and multi-view virtual try-on tasks, compared with existing methods. APPENDIX A IMPLEMENTATION DETAILS Hard-Selection. In this section, we present more details about the proposed hard-selection for global clothing features. Specifically, in multi-view virtual try-on task, we use OpenPose [4, 36, 42] to extract the skeleton images of target person, frontal clothing and back clothing as pose information \ud835\udc5d\u210e, \ud835\udc5d\ud835\udc53, and \ud835\udc5d\ud835\udc4f, respectively. After that, we decide whether to use frontal-view clothing or back-view clothing based on the relative positions of the target person\u2019s left arm and right arm in the skeleton images. As shown in Figure A, if the right arm appears positioned to the left of the left arm in the skeleton image (columns one to three in Figure A), frontalview clothing is chosen; otherwise, back-view clothing is preferred (columns four to five in Figure A). In addition, following previous works [11, 30, 44], we adopt PF-AFN [9] and GP-VTON [44] to obtain the pre-warped cloth on frontal-view try-on task; and we utilize StableVITON [22] to get it on multi-view try-on task. Pose Encoder. The pose encoder is used to extract features of skeleton images. It is a tiny network that contains three blocks, followed by the layer normalization [1]. Each block comprises one convolution layer, one GELU [15] activation layer, and a downsampling operation. We utilize the acquired pose embeddings as input for the proposed soft-selection block. MVG Dataset. To construct dataset for multi-view virtual try-on (MV-VTON), we first collect a large number of videos from YOOX NET-A-PORTER1, Taobao2 and TikTok3 websites, then filter out 1009 videos where the person in the video turns at least 180 degrees while wearing clothes. Afterwards, we divide each video into frames and handpick 5 frames to constitute a sample within our MVG dataset. Across these 5 frames, the person is captured from various angles, approximately spanning 0 (i.e., frontal view), 45, 90, 135, and 180 (i.e., back view) degrees, as shown in the first row of Figure B. In addition, following the previous DressCode dataset [31], we employ SCHP model [26] to extract the corresponding human parsing maps and utilize DensePose [12] to obtain the person\u2019s dense labels, as shown in the second and third row of Figure B. Human parsing maps can be utilized to generate cloth-agnostic person images, which are necessary for training and inference processes. B MORE QUALITATIVE RESULTS Results on Multi-View VTON. In this section, we present more qualitative results on the MVG dataset. Specifically, as shown in 1https://net-a-porter.com 2https://taobao.com 3https://douyin.com Selected Garment Pose Target Person Figure A: Visualization of the person and corresponding poses. We select one of garments based on the relative positions of left and right arms in the skeleton image when performing hard-selection on the multi-view virtual try-on task. Figure D, we show multiple groups of try-on results for the same person under different views, using the proposed method. In Figure D, the first column displays frontal-view and back-view garments, the second to fourth columns depict persons from different views, while the fifth to seventh columns showcase the corresponding try-on results. As can be seen, our method can generate realistic dressing-up results of the multi-view person from the given two views of clothing. Furthermore, our method can retain the details well on the original clothing (e.g., the buttons in the fifth row) and generate high-fidelity try-on images even under occlusion (e.g., hair occlusion in the second row). In conclusion, the proposed method exhibits outstanding performance on multi-view virtual try-on task. Results on Frontal-View VTON. In this section, we show more visual comparison results on VITON-HD [5] dataset and DressCode [31] dataset. Previous works include Paint By Example [47], PF-AFN [9], GP-VTON [44], LaDI-VTON [30], DCI-VTON [11] and StableVITON [22]. The results are shown in Figure E. In the first and second row of Figure E, it can be seen that our method better preserves the shape of the original clothing (e.g., the cuff in the second row), compared to the previous methods. In addition, our method outperforms previous methods in preserving highfrequency details, such as patterns on clothing in the fourth and sixth rows. Moreover, in contrast to previous methods, MV-VTON is not constrained by specific types of clothing and can achieve highly realistic effects across a wide range of garment styles (e.g., the garment in the third row and the collar in the eighth row). In summary, our method also has superiority on frontal-view virtual try-on task. High Resolution Results on Frontal-View VTON. Following the previous works [11, 22, 30], the above results are all obtained at 512\u00d7384 resolution. In this section, we present more results at 1024\u00d7768 resolution on VITON-HD [5] and DressCode [31] datasets, arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo Dense Pose Human Parsing Map Target Person Figure B: Examples of human parsing maps and dense pose in our dataset. The parsing maps can be used to synthesize cloth-agnostic person images. as shown in the Figure F. Specifically, we utilize the model trained at 512\u00d7384 resolution to directly test at 1024\u00d7768 resolution. Despite the difference in resolutions between training and testing, our method can also produce high-fidelity try-on results. For instance, the generated images can preserve both the intricate patterns and text adorning the clothing (in the first row) while also effectively maintaining their original shapes (in the last row). C LIMITATIONS Despite outperforming previous methods on both frontal-view and multi-view virtual try-on tasks, our method does not perform well in all cases. Figure C displays some unsatisfactory try-on results. As can be seen, although our method can preserve the shape and texture of original clothing (e.g., the \u2019DIESEL\u2019 text in the first row), it is difficult for it to fully preserve some smaller or more complex details (e.g., the parts circled in red). The reason for this phenomenon may be that these details are easily lost when inpainting in latent space. We will try to solve this issue in future work. Clothing Person Result Figure C: Visualization of bad cases on VITON-HD dataset. MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON Clothing View 1 View 2 View 3 Result 1 Result 2 Result 3 Figure D: Qualitative results on multi-view virtual try-on task. arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo Clothing Person Paint By Example PF-AFN GP-VTON LaDI-VTON DCI-VTON StableVITON Ours Figure E: Qualitative comparison on frontal-view virtual try-on task. MV-VTON: Multi-View Virtual Try-On with Diffusion Models arXiv, 2024, MV-VTON Figure F: Qualitative results of 1024\u00d7768 resolution on frontal-view virtual try-on task. arXiv, 2024, MV-VTON Haoyu Wang, Zhilu Zhang, Donglin Di, Shiliang Zhang, and Wangmeng Zuo" + }, + { + "url": "http://arxiv.org/abs/2103.00020v1", + "title": "Learning Transferable Visual Models From Natural Language Supervision", + "abstract": "State-of-the-art computer vision systems are trained to predict a fixed set\nof predetermined object categories. This restricted form of supervision limits\ntheir generality and usability since additional labeled data is needed to\nspecify any other visual concept. Learning directly from raw text about images\nis a promising alternative which leverages a much broader source of\nsupervision. We demonstrate that the simple pre-training task of predicting\nwhich caption goes with which image is an efficient and scalable way to learn\nSOTA image representations from scratch on a dataset of 400 million (image,\ntext) pairs collected from the internet. After pre-training, natural language\nis used to reference learned visual concepts (or describe new ones) enabling\nzero-shot transfer of the model to downstream tasks. We study the performance\nof this approach by benchmarking on over 30 different existing computer vision\ndatasets, spanning tasks such as OCR, action recognition in videos,\ngeo-localization, and many types of fine-grained object classification. The\nmodel transfers non-trivially to most tasks and is often competitive with a\nfully supervised baseline without the need for any dataset specific training.\nFor instance, we match the accuracy of the original ResNet-50 on ImageNet\nzero-shot without needing to use any of the 1.28 million training examples it\nwas trained on. We release our code and pre-trained model weights at\nhttps://github.com/OpenAI/CLIP.", + "authors": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever", + "published": "2021-02-26", + "updated": "2021-02-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2211.13227v1", + "title": "Paint by Example: Exemplar-based Image Editing with Diffusion Models", + "abstract": "Language-guided image editing has achieved great success recently. In this\npaper, for the first time, we investigate exemplar-guided image editing for\nmore precise control. We achieve this goal by leveraging self-supervised\ntraining to disentangle and re-organize the source image and the exemplar.\nHowever, the naive approach will cause obvious fusing artifacts. We carefully\nanalyze it and propose an information bottleneck and strong augmentations to\navoid the trivial solution of directly copying and pasting the exemplar image.\nMeanwhile, to ensure the controllability of the editing process, we design an\narbitrary shape mask for the exemplar image and leverage the classifier-free\nguidance to increase the similarity to the exemplar image. The whole framework\ninvolves a single forward of the diffusion model without any iterative\noptimization. We demonstrate that our method achieves an impressive performance\nand enables controllable editing on in-the-wild images with high fidelity.", + "authors": "Binxin Yang, Shuyang Gu, Bo Zhang, Ting Zhang, Xuejin Chen, Xiaoyan Sun, Dong Chen, Fang Wen", + "published": "2022-11-23", + "updated": "2022-11-23", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2312.01725v1", + "title": "StableVITON: Learning Semantic Correspondence with Latent Diffusion Model for Virtual Try-On", + "abstract": "Given a clothing image and a person image, an image-based virtual try-on aims\nto generate a customized image that appears natural and accurately reflects the\ncharacteristics of the clothing image. In this work, we aim to expand the\napplicability of the pre-trained diffusion model so that it can be utilized\nindependently for the virtual try-on task.The main challenge is to preserve the\nclothing details while effectively utilizing the robust generative capability\nof the pre-trained model. In order to tackle these issues, we propose\nStableVITON, learning the semantic correspondence between the clothing and the\nhuman body within the latent space of the pre-trained diffusion model in an\nend-to-end manner. Our proposed zero cross-attention blocks not only preserve\nthe clothing details by learning the semantic correspondence but also generate\nhigh-fidelity images by utilizing the inherent knowledge of the pre-trained\nmodel in the warping process. Through our proposed novel attention total\nvariation loss and applying augmentation, we achieve the sharp attention map,\nresulting in a more precise representation of clothing details. StableVITON\noutperforms the baselines in qualitative and quantitative evaluation, showing\npromising quality in arbitrary person images. Our code is available at\nhttps://github.com/rlawjdghek/StableVITON.", + "authors": "Jeongho Kim, Gyojung Gu, Minho Park, Sunghyun Park, Jaegul Choo", + "published": "2023-12-04", + "updated": "2023-12-04", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1902.11026v1", + "title": "Towards Multi-pose Guided Virtual Try-on Network", + "abstract": "Virtual try-on system under arbitrary human poses has huge application\npotential, yet raises quite a lot of challenges, e.g. self-occlusions, heavy\nmisalignment among diverse poses, and diverse clothes textures. Existing\nmethods aim at fitting new clothes into a person can only transfer clothes on\nthe fixed human pose, but still show unsatisfactory performances which often\nfail to preserve the identity, lose the texture details, and decrease the\ndiversity of poses. In this paper, we make the first attempt towards multi-pose\nguided virtual try-on system, which enables transfer clothes on a person image\nunder diverse poses. Given an input person image, a desired clothes image, and\na desired pose, the proposed Multi-pose Guided Virtual Try-on Network (MG-VTON)\ncan generate a new person image after fitting the desired clothes into the\ninput image and manipulating human poses. Our MG-VTON is constructed in three\nstages: 1) a desired human parsing map of the target image is synthesized to\nmatch both the desired pose and the desired clothes shape; 2) a deep Warping\nGenerative Adversarial Network (Warp-GAN) warps the desired clothes appearance\ninto the synthesized human parsing map and alleviates the misalignment problem\nbetween the input human pose and desired human pose; 3) a refinement render\nutilizing multi-pose composition masks recovers the texture details of clothes\nand removes some artifacts. Extensive experiments on well-known datasets and\nour newly collected largest virtual try-on benchmark demonstrate that our\nMG-VTON significantly outperforms all state-of-the-art methods both\nqualitatively and quantitatively with promising multi-pose virtual try-on\nperformances.", + "authors": "Haoye Dong, Xiaodan Liang, Bochao Wang, Hanjiang Lai, Jia Zhu, Jian Yin", + "published": "2019-02-28", + "updated": "2019-02-28", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2003.05863v1", + "title": "Towards Photo-Realistic Virtual Try-On by Adaptively Generating$\\leftrightarrow$Preserving Image Content", + "abstract": "Image visual try-on aims at transferring a target clothing image onto a\nreference person, and has become a hot topic in recent years. Prior arts\nusually focus on preserving the character of a clothing image (e.g. texture,\nlogo, embroidery) when warping it to arbitrary human pose. However, it remains\na big challenge to generate photo-realistic try-on images when large occlusions\nand human poses are presented in the reference person. To address this issue,\nwe propose a novel visual try-on network, namely Adaptive Content Generating\nand Preserving Network (ACGPN). In particular, ACGPN first predicts semantic\nlayout of the reference image that will be changed after try-on (e.g. long\nsleeve shirt$\\rightarrow$arm, arm$\\rightarrow$jacket), and then determines\nwhether its image content needs to be generated or preserved according to the\npredicted semantic layout, leading to photo-realistic try-on and rich clothing\ndetails. ACGPN generally involves three major modules. First, a semantic layout\ngeneration module utilizes semantic segmentation of the reference image to\nprogressively predict the desired semantic layout after try-on. Second, a\nclothes warping module warps clothing images according to the generated\nsemantic layout, where a second-order difference constraint is introduced to\nstabilize the warping process during training. Third, an inpainting module for\ncontent fusion integrates all information (e.g. reference image, semantic\nlayout, warped clothes) to adaptively produce each semantic part of human body.\nIn comparison to the state-of-the-art methods, ACGPN can generate\nphoto-realistic images with much better perceptual quality and richer\nfine-details.", + "authors": "Han Yang, Ruimao Zhang, Xiaobao Guo, Wei Liu, Wangmeng Zuo, Ping Luo", + "published": "2020-03-12", + "updated": "2020-03-12", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.GR", + "eess.IV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2302.05543v3", + "title": "Adding Conditional Control to Text-to-Image Diffusion Models", + "abstract": "We present ControlNet, a neural network architecture to add spatial\nconditioning controls to large, pretrained text-to-image diffusion models.\nControlNet locks the production-ready large diffusion models, and reuses their\ndeep and robust encoding layers pretrained with billions of images as a strong\nbackbone to learn a diverse set of conditional controls. The neural\narchitecture is connected with \"zero convolutions\" (zero-initialized\nconvolution layers) that progressively grow the parameters from zero and ensure\nthat no harmful noise could affect the finetuning. We test various conditioning\ncontrols, eg, edges, depth, segmentation, human pose, etc, with Stable\nDiffusion, using single or multiple conditions, with or without prompts. We\nshow that the training of ControlNets is robust with small (<50k) and large\n(>1m) datasets. Extensive results show that ControlNet may facilitate wider\napplications to control image diffusion models.", + "authors": "Lvmin Zhang, Anyi Rao, Maneesh Agrawala", + "published": "2023-02-10", + "updated": "2023-11-26", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.GR", + "cs.HC", + "cs.MM" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2305.13501v3", + "title": "LaDI-VTON: Latent Diffusion Textual-Inversion Enhanced Virtual Try-On", + "abstract": "The rapidly evolving fields of e-commerce and metaverse continue to seek\ninnovative approaches to enhance the consumer experience. At the same time,\nrecent advancements in the development of diffusion models have enabled\ngenerative networks to create remarkably realistic images. In this context,\nimage-based virtual try-on, which consists in generating a novel image of a\ntarget model wearing a given in-shop garment, has yet to capitalize on the\npotential of these powerful generative solutions. This work introduces\nLaDI-VTON, the first Latent Diffusion textual Inversion-enhanced model for the\nVirtual Try-ON task. The proposed architecture relies on a latent diffusion\nmodel extended with a novel additional autoencoder module that exploits\nlearnable skip connections to enhance the generation process preserving the\nmodel's characteristics. To effectively maintain the texture and details of the\nin-shop garment, we propose a textual inversion component that can map the\nvisual features of the garment to the CLIP token embedding space and thus\ngenerate a set of pseudo-word token embeddings capable of conditioning the\ngeneration process. Experimental results on Dress Code and VITON-HD datasets\ndemonstrate that our approach outperforms the competitors by a consistent\nmargin, achieving a significant milestone for the task. Source code and trained\nmodels are publicly available at: https://github.com/miccunifi/ladi-vton.", + "authors": "Davide Morelli, Alberto Baldrati, Giuseppe Cartella, Marcella Cornia, Marco Bertini, Rita Cucchiara", + "published": "2023-05-22", + "updated": "2023-08-03", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI", + "cs.MM" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2308.06101v1", + "title": "Taming the Power of Diffusion Models for High-Quality Virtual Try-On with Appearance Flow", + "abstract": "Virtual try-on is a critical image synthesis task that aims to transfer\nclothes from one image to another while preserving the details of both humans\nand clothes. While many existing methods rely on Generative Adversarial\nNetworks (GANs) to achieve this, flaws can still occur, particularly at high\nresolutions. Recently, the diffusion model has emerged as a promising\nalternative for generating high-quality images in various applications.\nHowever, simply using clothes as a condition for guiding the diffusion model to\ninpaint is insufficient to maintain the details of the clothes. To overcome\nthis challenge, we propose an exemplar-based inpainting approach that leverages\na warping module to guide the diffusion model's generation effectively. The\nwarping module performs initial processing on the clothes, which helps to\npreserve the local details of the clothes. We then combine the warped clothes\nwith clothes-agnostic person image and add noise as the input of diffusion\nmodel. Additionally, the warped clothes is used as local conditions for each\ndenoising process to ensure that the resulting output retains as much detail as\npossible. Our approach, namely Diffusion-based Conditional Inpainting for\nVirtual Try-ON (DCI-VTON), effectively utilizes the power of the diffusion\nmodel, and the incorporation of the warping module helps to produce\nhigh-quality and realistic virtual try-on results. Experimental results on\nVITON-HD demonstrate the effectiveness and superiority of our method.", + "authors": "Junhong Gou, Siyu Sun, Jianfu Zhang, Jianlou Si, Chen Qian, Liqing Zhang", + "published": "2023-08-11", + "updated": "2023-08-11", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2306.08276v1", + "title": "TryOnDiffusion: A Tale of Two UNets", + "abstract": "Given two images depicting a person and a garment worn by another person, our\ngoal is to generate a visualization of how the garment might look on the input\nperson. A key challenge is to synthesize a photorealistic detail-preserving\nvisualization of the garment, while warping the garment to accommodate a\nsignificant body pose and shape change across the subjects. Previous methods\neither focus on garment detail preservation without effective pose and shape\nvariation, or allow try-on with the desired shape and pose but lack garment\ndetails. In this paper, we propose a diffusion-based architecture that unifies\ntwo UNets (referred to as Parallel-UNet), which allows us to preserve garment\ndetails and warp the garment for significant pose and body change in a single\nnetwork. The key ideas behind Parallel-UNet include: 1) garment is warped\nimplicitly via a cross attention mechanism, 2) garment warp and person blend\nhappen as part of a unified process as opposed to a sequence of two separate\ntasks. Experimental results indicate that TryOnDiffusion achieves\nstate-of-the-art performance both qualitatively and quantitatively.", + "authors": "Luyang Zhu, Dawei Yang, Tyler Zhu, Fitsum Reda, William Chan, Chitwan Saharia, Mohammad Norouzi, Ira Kemelmacher-Shlizerman", + "published": "2023-06-14", + "updated": "2023-06-14", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.GR" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.09479v2", + "title": "Disentangled Cycle Consistency for Highly-realistic Virtual Try-On", + "abstract": "Image virtual try-on replaces the clothes on a person image with a desired\nin-shop clothes image. It is challenging because the person and the in-shop\nclothes are unpaired. Existing methods formulate virtual try-on as either\nin-painting or cycle consistency. Both of these two formulations encourage the\ngeneration networks to reconstruct the input image in a self-supervised manner.\nHowever, existing methods do not differentiate clothing and non-clothing\nregions. A straight-forward generation impedes virtual try-on quality because\nof the heavily coupled image contents. In this paper, we propose a Disentangled\nCycle-consistency Try-On Network (DCTON). The DCTON is able to produce\nhighly-realistic try-on images by disentangling important components of virtual\ntry-on including clothes warping, skin synthesis, and image composition. To\nthis end, DCTON can be naturally trained in a self-supervised manner following\ncycle consistency learning. Extensive experiments on challenging benchmarks\nshow that DCTON outperforms state-of-the-art approaches favorably.", + "authors": "Chongjian Ge, Yibing Song, Yuying Ge, Han Yang, Wei Liu, Ping Luo", + "published": "2021-03-17", + "updated": "2021-03-19", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1802.05957v1", + "title": "Spectral Normalization for Generative Adversarial Networks", + "abstract": "One of the challenges in the study of generative adversarial networks is the\ninstability of its training. In this paper, we propose a novel weight\nnormalization technique called spectral normalization to stabilize the training\nof the discriminator. Our new normalization technique is computationally light\nand easy to incorporate into existing implementations. We tested the efficacy\nof spectral normalization on CIFAR10, STL-10, and ILSVRC2012 dataset, and we\nexperimentally confirmed that spectrally normalized GANs (SN-GANs) is capable\nof generating images of better or equal quality relative to the previous\ntraining stabilization techniques.", + "authors": "Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida", + "published": "2018-02-16", + "updated": "2018-02-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2206.14180v2", + "title": "High-Resolution Virtual Try-On with Misalignment and Occlusion-Handled Conditions", + "abstract": "Image-based virtual try-on aims to synthesize an image of a person wearing a\ngiven clothing item. To solve the task, the existing methods warp the clothing\nitem to fit the person's body and generate the segmentation map of the person\nwearing the item before fusing the item with the person. However, when the\nwarping and the segmentation generation stages operate individually without\ninformation exchange, the misalignment between the warped clothes and the\nsegmentation map occurs, which leads to the artifacts in the final image. The\ninformation disconnection also causes excessive warping near the clothing\nregions occluded by the body parts, so-called pixel-squeezing artifacts. To\nsettle the issues, we propose a novel try-on condition generator as a unified\nmodule of the two stages (i.e., warping and segmentation generation stages). A\nnewly proposed feature fusion block in the condition generator implements the\ninformation exchange, and the condition generator does not create any\nmisalignment or pixel-squeezing artifacts. We also introduce discriminator\nrejection that filters out the incorrect segmentation map predictions and\nassures the performance of virtual try-on frameworks. Experiments on a\nhigh-resolution dataset demonstrate that our model successfully handles the\nmisalignment and occlusion, and significantly outperforms the baselines. Code\nis available at https://github.com/sangyun884/HR-VITON.", + "authors": "Sangyun Lee, Gyojung Gu, Sunghyun Park, Seunghwan Choi, Jaegul Choo", + "published": "2022-06-28", + "updated": "2022-07-20", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.AI" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2302.13848v2", + "title": "ELITE: Encoding Visual Concepts into Textual Embeddings for Customized Text-to-Image Generation", + "abstract": "In addition to the unprecedented ability in imaginary creation, large\ntext-to-image models are expected to take customized concepts in image\ngeneration. Existing works generally learn such concepts in an\noptimization-based manner, yet bringing excessive computation or memory burden.\nIn this paper, we instead propose a learning-based encoder, which consists of a\nglobal and a local mapping networks for fast and accurate customized\ntext-to-image generation. In specific, the global mapping network projects the\nhierarchical features of a given image into multiple new words in the textual\nword embedding space, i.e., one primary word for well-editable concept and\nother auxiliary words to exclude irrelevant disturbances (e.g., background). In\nthe meantime, a local mapping network injects the encoded patch features into\ncross attention layers to provide omitted details, without sacrificing the\neditability of primary concepts. We compare our method with existing\noptimization-based approaches on a variety of user-defined concepts, and\ndemonstrate that our method enables high-fidelity inversion and more robust\neditability with a significantly faster encoding process. Our code is publicly\navailable at https://github.com/csyxwei/ELITE.", + "authors": "Yuxiang Wei, Yabo Zhang, Zhilong Ji, Jinfeng Bai, Lei Zhang, Wangmeng Zuo", + "published": "2023-02-27", + "updated": "2023-08-18", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2006.11239v2", + "title": "Denoising Diffusion Probabilistic Models", + "abstract": "We present high quality image synthesis results using diffusion probabilistic\nmodels, a class of latent variable models inspired by considerations from\nnonequilibrium thermodynamics. Our best results are obtained by training on a\nweighted variational bound designed according to a novel connection between\ndiffusion probabilistic models and denoising score matching with Langevin\ndynamics, and our models naturally admit a progressive lossy decompression\nscheme that can be interpreted as a generalization of autoregressive decoding.\nOn the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and\na state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality\nsimilar to ProgressiveGAN. Our implementation is available at\nhttps://github.com/hojonathanho/diffusion", + "authors": "Jonathan Ho, Ajay Jain, Pieter Abbeel", + "published": "2020-06-19", + "updated": "2020-12-16", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1807.07688v3", + "title": "Toward Characteristic-Preserving Image-based Virtual Try-On Network", + "abstract": "Image-based virtual try-on systems for fitting new in-shop clothes into a\nperson image have attracted increasing research attention, yet is still\nchallenging. A desirable pipeline should not only transform the target clothes\ninto the most fitting shape seamlessly but also preserve well the clothes\nidentity in the generated image, that is, the key characteristics (e.g.\ntexture, logo, embroidery) that depict the original clothes. However, previous\nimage-conditioned generation works fail to meet these critical requirements\ntowards the plausible virtual try-on performance since they fail to handle\nlarge spatial misalignment between the input image and target clothes. Prior\nwork explicitly tackled spatial deformation using shape context matching, but\nfailed to preserve clothing details due to its coarse-to-fine strategy. In this\nwork, we propose a new fully-learnable Characteristic-Preserving Virtual Try-On\nNetwork(CP-VTON) for addressing all real-world challenges in this task. First,\nCP-VTON learns a thin-plate spline transformation for transforming the in-shop\nclothes into fitting the body shape of the target person via a new Geometric\nMatching Module (GMM) rather than computing correspondences of interest points\nas prior works did. Second, to alleviate boundary artifacts of warped clothes\nand make the results more realistic, we employ a Try-On Module that learns a\ncomposition mask to integrate the warped clothes and the rendered image to\nensure smoothness. Extensive experiments on a fashion dataset demonstrate our\nCP-VTON achieves the state-of-the-art virtual try-on performance both\nqualitatively and quantitatively.", + "authors": "Bochao Wang, Huabin Zheng, Xiaodan Liang, Yimin Chen, Liang Lin, Meng Yang", + "published": "2018-07-20", + "updated": "2018-09-12", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2303.13756v1", + "title": "GP-VTON: Towards General Purpose Virtual Try-on via Collaborative Local-Flow Global-Parsing Learning", + "abstract": "Image-based Virtual Try-ON aims to transfer an in-shop garment onto a\nspecific person. Existing methods employ a global warping module to model the\nanisotropic deformation for different garment parts, which fails to preserve\nthe semantic information of different parts when receiving challenging inputs\n(e.g, intricate human poses, difficult garments). Moreover, most of them\ndirectly warp the input garment to align with the boundary of the preserved\nregion, which usually requires texture squeezing to meet the boundary shape\nconstraint and thus leads to texture distortion. The above inferior performance\nhinders existing methods from real-world applications. To address these\nproblems and take a step towards real-world virtual try-on, we propose a\nGeneral-Purpose Virtual Try-ON framework, named GP-VTON, by developing an\ninnovative Local-Flow Global-Parsing (LFGP) warping module and a Dynamic\nGradient Truncation (DGT) training strategy. Specifically, compared with the\nprevious global warping mechanism, LFGP employs local flows to warp garments\nparts individually, and assembles the local warped results via the global\ngarment parsing, resulting in reasonable warped parts and a semantic-correct\nintact garment even with challenging inputs.On the other hand, our DGT training\nstrategy dynamically truncates the gradient in the overlap area and the warped\ngarment is no more required to meet the boundary constraint, which effectively\navoids the texture squeezing problem. Furthermore, our GP-VTON can be easily\nextended to multi-category scenario and jointly trained by using data from\ndifferent garment categories. Extensive experiments on two high-resolution\nbenchmarks demonstrate our superiority over the existing state-of-the-art\nmethods.", + "authors": "Zhenyu Xie, Zaiyu Huang, Xin Dong, Fuwei Zhao, Haoye Dong, Xijin Zhang, Feida Zhu, Xiaodan Liang", + "published": "2023-03-24", + "updated": "2023-03-24", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2112.10752v2", + "title": "High-Resolution Image Synthesis with Latent Diffusion Models", + "abstract": "By decomposing the image formation process into a sequential application of\ndenoising autoencoders, diffusion models (DMs) achieve state-of-the-art\nsynthesis results on image data and beyond. Additionally, their formulation\nallows for a guiding mechanism to control the image generation process without\nretraining. However, since these models typically operate directly in pixel\nspace, optimization of powerful DMs often consumes hundreds of GPU days and\ninference is expensive due to sequential evaluations. To enable DM training on\nlimited computational resources while retaining their quality and flexibility,\nwe apply them in the latent space of powerful pretrained autoencoders. In\ncontrast to previous work, training diffusion models on such a representation\nallows for the first time to reach a near-optimal point between complexity\nreduction and detail preservation, greatly boosting visual fidelity. By\nintroducing cross-attention layers into the model architecture, we turn\ndiffusion models into powerful and flexible generators for general conditioning\ninputs such as text or bounding boxes and high-resolution synthesis becomes\npossible in a convolutional manner. Our latent diffusion models (LDMs) achieve\na new state of the art for image inpainting and highly competitive performance\non various tasks, including unconditional image generation, semantic scene\nsynthesis, and super-resolution, while significantly reducing computational\nrequirements compared to pixel-based DMs. Code is available at\nhttps://github.com/CompVis/latent-diffusion .", + "authors": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Bj\u00f6rn Ommer", + "published": "2021-12-20", + "updated": "2022-04-13", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2103.04559v2", + "title": "Parser-Free Virtual Try-on via Distilling Appearance Flows", + "abstract": "Image virtual try-on aims to fit a garment image (target clothes) to a person\nimage. Prior methods are heavily based on human parsing. However,\nslightly-wrong segmentation results would lead to unrealistic try-on images\nwith large artifacts. Inaccurate parsing misleads parser-based methods to\nproduce visually unrealistic results where artifacts usually occur. A recent\npioneering work employed knowledge distillation to reduce the dependency of\nhuman parsing, where the try-on images produced by a parser-based method are\nused as supervisions to train a \"student\" network without relying on\nsegmentation, making the student mimic the try-on ability of the parser-based\nmodel. However, the image quality of the student is bounded by the parser-based\nmodel. To address this problem, we propose a novel approach,\n\"teacher-tutor-student\" knowledge distillation, which is able to produce highly\nphoto-realistic images without human parsing, possessing several appealing\nadvantages compared to prior arts. (1) Unlike existing work, our approach\ntreats the fake images produced by the parser-based method as \"tutor\nknowledge\", where the artifacts can be corrected by real \"teacher knowledge\",\nwhich is extracted from the real person images in a self-supervised way. (2)\nOther than using real images as supervisions, we formulate knowledge\ndistillation in the try-on problem as distilling the appearance flows between\nthe person image and the garment image, enabling us to find accurate dense\ncorrespondences between them to produce high-quality results. (3) Extensive\nevaluations show large superiority of our method (see Fig. 1).", + "authors": "Yuying Ge, Yibing Song, Ruimao Zhang, Chongjian Ge, Wei Liu, Ping Luo", + "published": "2021-03-08", + "updated": "2021-03-09", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2010.02502v4", + "title": "Denoising Diffusion Implicit Models", + "abstract": "Denoising diffusion probabilistic models (DDPMs) have achieved high quality\nimage generation without adversarial training, yet they require simulating a\nMarkov chain for many steps to produce a sample. To accelerate sampling, we\npresent denoising diffusion implicit models (DDIMs), a more efficient class of\niterative implicit probabilistic models with the same training procedure as\nDDPMs. In DDPMs, the generative process is defined as the reverse of a\nMarkovian diffusion process. We construct a class of non-Markovian diffusion\nprocesses that lead to the same training objective, but whose reverse process\ncan be much faster to sample from. We empirically demonstrate that DDIMs can\nproduce high quality samples $10 \\times$ to $50 \\times$ faster in terms of\nwall-clock time compared to DDPMs, allow us to trade off computation for sample\nquality, and can perform semantically meaningful image interpolation directly\nin the latent space.", + "authors": "Jiaming Song, Chenlin Meng, Stefano Ermon", + "published": "2020-10-06", + "updated": "2022-10-05", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/2208.01618v1", + "title": "An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion", + "abstract": "Text-to-image models offer unprecedented freedom to guide creation through\nnatural language. Yet, it is unclear how such freedom can be exercised to\ngenerate images of specific unique concepts, modify their appearance, or\ncompose them in new roles and novel scenes. In other words, we ask: how can we\nuse language-guided models to turn our cat into a painting, or imagine a new\nproduct based on our favorite toy? Here we present a simple approach that\nallows such creative freedom. Using only 3-5 images of a user-provided concept,\nlike an object or a style, we learn to represent it through new \"words\" in the\nembedding space of a frozen text-to-image model. These \"words\" can be composed\ninto natural language sentences, guiding personalized creation in an intuitive\nway. Notably, we find evidence that a single word embedding is sufficient for\ncapturing unique and varied concepts. We compare our approach to a wide range\nof baselines, and demonstrate that it can more faithfully portray the concepts\nacross a range of applications and tasks.\n Our code, data and new words will be available at:\nhttps://textual-inversion.github.io", + "authors": "Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H. Bermano, Gal Chechik, Daniel Cohen-Or", + "published": "2022-08-02", + "updated": "2022-08-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CL", + "cs.GR", + "cs.LG" + ], + "label": "Related Work" + }, + { + "url": "http://arxiv.org/abs/1911.11645v1", + "title": "Effects of different discretisations of the Laplacian upon stochastic simulations of reaction-diffusion systems on both static and growing domains", + "abstract": "By discretising space into compartments and letting system dynamics be\ngoverned by the reaction-diffusion master equation, it is possible to derive\nand simulate a stochastic model of reaction and diffusion on an arbitrary\ndomain. However, there are many implementation choices involved in this\nprocess, such as the choice of discretisation and method of derivation of the\ndiffusive jump rates, and it is not clear a priori how these affect model\npredictions. To shed light on this issue, in this work we explore how a variety\nof discretisations and method for derivation of the diffusive jump rates affect\nthe outputs of stochastic simulations of reaction-diffusion models, in\nparticular using Turing's model of pattern formation as a key example. We\nconsider both static and uniformly growing domains and demonstrate that, while\nonly minor differences are observed for simple reaction-diffusion systems,\nthere can be vast differences in model predictions for systems that include\ncomplicated reaction kinetics, such as Turing's model of pattern formation. Our\nwork highlights that care must be taken in using the reaction-diffusion master\nequation to make predictions as to the dynamics of stochastic\nreaction-diffusion systems.", + "authors": "Bartosz J. Bartmanski, Ruth E. Baker", + "published": "2019-11-26", + "updated": "2019-11-26", + "primary_cat": "physics.comp-ph", + "cats": [ + "physics.comp-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2210.07677v1", + "title": "TransFusion: Transcribing Speech with Multinomial Diffusion", + "abstract": "Diffusion models have shown exceptional scaling properties in the image\nsynthesis domain, and initial attempts have shown similar benefits for applying\ndiffusion to unconditional text synthesis. Denoising diffusion models attempt\nto iteratively refine a sampled noise signal until it resembles a coherent\nsignal (such as an image or written sentence). In this work we aim to see\nwhether the benefits of diffusion models can also be realized for speech\nrecognition. To this end, we propose a new way to perform speech recognition\nusing a diffusion model conditioned on pretrained speech features.\nSpecifically, we propose TransFusion: a transcribing diffusion model which\niteratively denoises a random character sequence into coherent text\ncorresponding to the transcript of a conditioning utterance. We demonstrate\ncomparable performance to existing high-performing contrastive models on the\nLibriSpeech speech recognition benchmark. To the best of our knowledge, we are\nthe first to apply denoising diffusion to speech recognition. We also propose\nnew techniques for effectively sampling and decoding multinomial diffusion\nmodels. These are required because traditional methods of sampling from\nacoustic models are not possible with our new discrete diffusion approach. Code\nand trained models are available: https://github.com/RF5/transfusion-asr", + "authors": "Matthew Baas, Kevin Eloff, Herman Kamper", + "published": "2022-10-14", + "updated": "2022-10-14", + "primary_cat": "eess.AS", + "cats": [ + "eess.AS", + "cs.AI", + "cs.SD" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2206.12327v1", + "title": "Source Localization of Graph Diffusion via Variational Autoencoders for Graph Inverse Problems", + "abstract": "Graph diffusion problems such as the propagation of rumors, computer viruses,\nor smart grid failures are ubiquitous and societal. Hence it is usually crucial\nto identify diffusion sources according to the current graph diffusion\nobservations. Despite its tremendous necessity and significance in practice,\nsource localization, as the inverse problem of graph diffusion, is extremely\nchallenging as it is ill-posed: different sources may lead to the same graph\ndiffusion patterns. Different from most traditional source localization\nmethods, this paper focuses on a probabilistic manner to account for the\nuncertainty of different candidate sources. Such endeavors require overcoming\nchallenges including 1) the uncertainty in graph diffusion source localization\nis hard to be quantified; 2) the complex patterns of the graph diffusion\nsources are difficult to be probabilistically characterized; 3) the\ngeneralization under any underlying diffusion patterns is hard to be imposed.\nTo solve the above challenges, this paper presents a generic framework: Source\nLocalization Variational AutoEncoder (SL-VAE) for locating the diffusion\nsources under arbitrary diffusion patterns. Particularly, we propose a\nprobabilistic model that leverages the forward diffusion estimation model along\nwith deep generative models to approximate the diffusion source distribution\nfor quantifying the uncertainty. SL-VAE further utilizes prior knowledge of the\nsource-observation pairs to characterize the complex patterns of diffusion\nsources by a learned generative prior. Lastly, a unified objective that\nintegrates the forward diffusion estimation model is derived to enforce the\nmodel to generalize under arbitrary diffusion patterns. Extensive experiments\nare conducted on 7 real-world datasets to demonstrate the superiority of SL-VAE\nin reconstructing the diffusion sources by excelling other methods on average\n20% in AUC score.", + "authors": "Chen Ling, Junji Jiang, Junxiang Wang, Liang Zhao", + "published": "2022-06-24", + "updated": "2022-06-24", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.IT", + "math.IT" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/0801.3436v1", + "title": "Model for Diffusion-Induced Ramsey Narrowing", + "abstract": "Diffusion-induced Ramsey narrowing that appears when atoms can leave the\ninteraction region and repeatedly return without lost of coherence is\ninvestigated using strong collisions approximation. The effective diffusion\nequation is obtained and solved for low-dimensional model configurations and\nthree-dimensional real one.", + "authors": "Alexander Romanenko, Leonid Yatsenko", + "published": "2008-01-22", + "updated": "2008-01-22", + "primary_cat": "quant-ph", + "cats": [ + "quant-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1705.07063v1", + "title": "Double diffusivity model under stochastic forcing", + "abstract": "The \"double diffusivity\" model was proposed in the late 1970s, and reworked\nin the early 1980s, as a continuum counterpart to existing discrete models of\ndiffusion corresponding to high diffusivity paths, such as grain boundaries and\ndislocation lines. Technically, the model pans out as a system of coupled {\\it\nFick type} diffusion equations to represent \"regular\" and \"high\" diffusivity\npaths with \"source terms\" accounting for the mass exchange between the two\npaths. The model remit was extended by analogy to describe flow in porous media\nwith double porosity, as well as to model heat conduction in media with two\nnon-equilibrium local temperature baths e.g. ion and electron baths. Uncoupling\nof the two partial differential equations leads to a higher-ordered diffusion\nequation, solutions of which could be obtained in terms of classical diffusion\nequation solutions. Similar equations could also be derived within an \"internal\nlength\" gradient (ILG) mechanics formulation applied to diffusion problems,\n{\\it i.e.}, by introducing nonlocal effects, together with inertia and\nviscosity, in a mechanics based formulation of diffusion theory. This issue\nbecomes particularly important in the case of diffusion in nanopolycrystals\nwhose deterministic ILG based theoretical calculations predict a relaxation\ntime that is only about one-tenth of the actual experimentally verified\ntimescale. This article provides the \"missing link\" in this estimation by\nadding a vital element in the ILG structure, that of stochasticity, that takes\ninto account all boundary layer fluctuations. Our stochastic-ILG diffusion\ncalculation confirms rapprochement between theory and experiment, thereby\nbenchmarking a new generation of gradient-based continuum models that conform\ncloser to real life fluctuating environments.", + "authors": "Amit K Chattopadhyay, Elias C Aifantis", + "published": "2017-05-19", + "updated": "2017-05-19", + "primary_cat": "cond-mat.soft", + "cats": [ + "cond-mat.soft", + "cond-mat.mtrl-sci", + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2402.01965v2", + "title": "Analyzing Neural Network-Based Generative Diffusion Models through Convex Optimization", + "abstract": "Diffusion models are becoming widely used in state-of-the-art image, video\nand audio generation. Score-based diffusion models stand out among these\nmethods, necessitating the estimation of score function of the input data\ndistribution. In this study, we present a theoretical framework to analyze\ntwo-layer neural network-based diffusion models by reframing score matching and\ndenoising score matching as convex optimization. Though existing diffusion\ntheory is mainly asymptotic, we characterize the exact predicted score function\nand establish the convergence result for neural network-based diffusion models\nwith finite data. This work contributes to understanding what neural\nnetwork-based diffusion model learns in non-asymptotic settings.", + "authors": "Fangzhao Zhang, Mert Pilanci", + "published": "2024-02-03", + "updated": "2024-02-06", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "math.OC" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2212.10805v1", + "title": "Beyond Information Exchange: An Approach to Deploy Network Properties for Information Diffusion", + "abstract": "Information diffusion in Online Social Networks is a new and crucial problem\nin social network analysis field and requires significant research attention.\nEfficient diffusion of information are of critical importance in diverse\nsituations such as; pandemic prevention, advertising, marketing etc. Although\nseveral mathematical models have been developed till date, but previous works\nlacked systematic analysis and exploration of the influence of neighborhood for\ninformation diffusion. In this paper, we have proposed Common Neighborhood\nStrategy (CNS) algorithm for information diffusion that demonstrates the role\nof common neighborhood in information propagation throughout the network. The\nperformance of CNS algorithm is evaluated on several real-world datasets in\nterms of diffusion speed and diffusion outspread and compared with several\nwidely used information diffusion models. Empirical results show CNS algorithm\nenables better information diffusion both in terms of diffusion speed and\ndiffusion outspread.", + "authors": "Soumita Das, Anupam Biswas, Ravi Kishore Devarapalli", + "published": "2022-12-21", + "updated": "2022-12-21", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "cs.CV", + "cs.IR", + "J.4; G.4; I.6" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1812.07249v1", + "title": "A unifying approach to first-passage time distributions in diffusing diffusivity and switching diffusion models", + "abstract": "We propose a unifying theoretical framework for the analysis of first-passage\ntime distributions in two important classes of stochastic processes in which\nthe diffusivity of a particle evolves randomly in time. In the first class of\n\"diffusing diffusivity\" models, the diffusivity changes continuously via a\nprescribed stochastic equation. In turn, the diffusivity switches randomly\nbetween discrete values in the second class of \"switching diffusion\" models.\nFor both cases, we quantify the impact of the diffusivity dynamics onto the\nfirst-passage time distribution of a particle via the moment-generating\nfunction of the integrated diffusivity. We provide general formulas and some\nexplicit solutions for some particular cases of practical interest.", + "authors": "D. S. Grebenkov", + "published": "2018-12-18", + "updated": "2018-12-18", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech", + "physics.bio-ph", + "physics.chem-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1709.05336v1", + "title": "Cs diffusion in SiC high-energy grain boundaries", + "abstract": "Cesium (Cs) is a radioactive fission product whose release is of concern for\nTristructural-Isotropic (TRISO) fuel particles. In this work, Cs diffusion\nthrough high energy grain boundaries (HEGBs) of cubic-SiC is studied using an\nab-initio based kinetic Monte Carlo (kMC) model. The HEGB environment was\nmodeled as an amorphous SiC (a-SiC), and Cs defect energies were calculated\nusing density functional theory (DFT). From defect energies, it was suggested\nthat the fastest diffusion mechanism as Cs interstitial in an amorphous SiC.\nThe diffusion of Cs interstitial was simulated using a kMC, based on the site\nand transition state energies sampled from the DFT. The Cs HEGB diffusion\nexhibited an Arrhenius type diffusion in the range of 1200-1600{\\deg}C. The\ncomparison between HEGB results and the other studies suggests not only that\nthe GB diffusion dominates the bulk diffusion, but also that the HEGB is one of\nthe fastest grain boundary paths for the Cs diffusion. The diffusion\ncoefficients in HEGB are clearly a few orders of magnitude lower than the\nreported diffusion coefficients from in- and out-of- pile samples, suggesting\nthat other contributions are responsible, such as a radiation enhanced\ndiffusion.", + "authors": "Hyunseok Ko, Izabela Szlufarska, Dane Morgan", + "published": "2017-09-11", + "updated": "2017-09-11", + "primary_cat": "cond-mat.mtrl-sci", + "cats": [ + "cond-mat.mtrl-sci", + "nucl-th" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1705.01542v2", + "title": "A Spatial Structural Derivative Model for Ultraslow Diffusion", + "abstract": "This study investigates the ultraslow diffusion by a spatial structural\nderivative, in which the exponential function exp(x)is selected as the\nstructural function to construct the local structural derivative diffusion\nequation model. The analytical solution of the diffusion equation is a form of\nBiexponential distribution. Its corresponding mean squared displacement is\nnumerically calculated, and increases more slowly than the logarithmic function\nof time. The local structural derivative diffusion equation with the structural\nfunction exp(x)in space is an alternative physical and mathematical modeling\nmodel to characterize a kind of ultraslow diffusion.", + "authors": "Wei Xu, Wen Chen, Yingjie Liang, Jose Weberszpil", + "published": "2017-05-03", + "updated": "2017-06-13", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.13122v1", + "title": "Policy Representation via Diffusion Probability Model for Reinforcement Learning", + "abstract": "Popular reinforcement learning (RL) algorithms tend to produce a unimodal\npolicy distribution, which weakens the expressiveness of complicated policy and\ndecays the ability of exploration. The diffusion probability model is powerful\nto learn complicated multimodal distributions, which has shown promising and\npotential applications to RL. In this paper, we formally build a theoretical\nfoundation of policy representation via the diffusion probability model and\nprovide practical implementations of diffusion policy for online model-free RL.\nConcretely, we character diffusion policy as a stochastic process, which is a\nnew approach to representing a policy. Then we present a convergence guarantee\nfor diffusion policy, which provides a theory to understand the multimodality\nof diffusion policy. Furthermore, we propose the DIPO which is an\nimplementation for model-free online RL with DIffusion POlicy. To the best of\nour knowledge, DIPO is the first algorithm to solve model-free online RL\nproblems with the diffusion model. Finally, extensive empirical results show\nthe effectiveness and superiority of DIPO on the standard continuous control\nMujoco benchmark.", + "authors": "Long Yang, Zhixiong Huang, Fenghao Lei, Yucun Zhong, Yiming Yang, Cong Fang, Shiting Wen, Binbin Zhou, Zhouchen Lin", + "published": "2023-05-22", + "updated": "2023-05-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2403.01742v2", + "title": "Diffusion-TS: Interpretable Diffusion for General Time Series Generation", + "abstract": "Denoising diffusion probabilistic models (DDPMs) are becoming the leading\nparadigm for generative models. It has recently shown breakthroughs in audio\nsynthesis, time series imputation and forecasting. In this paper, we propose\nDiffusion-TS, a novel diffusion-based framework that generates multivariate\ntime series samples of high quality by using an encoder-decoder transformer\nwith disentangled temporal representations, in which the decomposition\ntechnique guides Diffusion-TS to capture the semantic meaning of time series\nwhile transformers mine detailed sequential information from the noisy model\ninput. Different from existing diffusion-based approaches, we train the model\nto directly reconstruct the sample instead of the noise in each diffusion step,\ncombining a Fourier-based loss term. Diffusion-TS is expected to generate time\nseries satisfying both interpretablity and realness. In addition, it is shown\nthat the proposed Diffusion-TS can be easily extended to conditional generation\ntasks, such as forecasting and imputation, without any model changes. This also\nmotivates us to further explore the performance of Diffusion-TS under irregular\nsettings. Finally, through qualitative and quantitative experiments, results\nshow that Diffusion-TS achieves the state-of-the-art results on various\nrealistic analyses of time series.", + "authors": "Xinyu Yuan, Yan Qiao", + "published": "2024-03-04", + "updated": "2024-03-14", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2211.07804v3", + "title": "Diffusion Models for Medical Image Analysis: A Comprehensive Survey", + "abstract": "Denoising diffusion models, a class of generative models, have garnered\nimmense interest lately in various deep-learning problems. A diffusion\nprobabilistic model defines a forward diffusion stage where the input data is\ngradually perturbed over several steps by adding Gaussian noise and then learns\nto reverse the diffusion process to retrieve the desired noise-free data from\nnoisy data samples. Diffusion models are widely appreciated for their strong\nmode coverage and quality of the generated samples despite their known\ncomputational burdens. Capitalizing on the advances in computer vision, the\nfield of medical imaging has also observed a growing interest in diffusion\nmodels. To help the researcher navigate this profusion, this survey intends to\nprovide a comprehensive overview of diffusion models in the discipline of\nmedical image analysis. Specifically, we introduce the solid theoretical\nfoundation and fundamental concepts behind diffusion models and the three\ngeneric diffusion modelling frameworks: diffusion probabilistic models,\nnoise-conditioned score networks, and stochastic differential equations. Then,\nwe provide a systematic taxonomy of diffusion models in the medical domain and\npropose a multi-perspective categorization based on their application, imaging\nmodality, organ of interest, and algorithms. To this end, we cover extensive\napplications of diffusion models in the medical domain. Furthermore, we\nemphasize the practical use case of some selected approaches, and then we\ndiscuss the limitations of the diffusion models in the medical domain and\npropose several directions to fulfill the demands of this field. Finally, we\ngather the overviewed studies with their available open-source implementations\nat\nhttps://github.com/amirhossein-kz/Awesome-Diffusion-Models-in-Medical-Imaging.", + "authors": "Amirhossein Kazerouni, Ehsan Khodapanah Aghdam, Moein Heidari, Reza Azad, Mohsen Fayyaz, Ilker Hacihaliloglu, Dorit Merhof", + "published": "2022-11-14", + "updated": "2023-06-03", + "primary_cat": "eess.IV", + "cats": [ + "eess.IV", + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2402.13144v1", + "title": "Neural Network Diffusion", + "abstract": "Diffusion models have achieved remarkable success in image and video\ngeneration. In this work, we demonstrate that diffusion models can also\n\\textit{generate high-performing neural network parameters}. Our approach is\nsimple, utilizing an autoencoder and a standard latent diffusion model. The\nautoencoder extracts latent representations of a subset of the trained network\nparameters. A diffusion model is then trained to synthesize these latent\nparameter representations from random noise. It then generates new\nrepresentations that are passed through the autoencoder's decoder, whose\noutputs are ready to use as new subsets of network parameters. Across various\narchitectures and datasets, our diffusion process consistently generates models\nof comparable or improved performance over trained networks, with minimal\nadditional cost. Notably, we empirically find that the generated models perform\ndifferently with the trained networks. Our results encourage more exploration\non the versatile use of diffusion models.", + "authors": "Kai Wang, Zhaopan Xu, Yukun Zhou, Zelin Zang, Trevor Darrell, Zhuang Liu, Yang You", + "published": "2024-02-20", + "updated": "2024-02-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/astro-ph/0012545v1", + "title": "Diffusion and the occurrence of hydrogen shell flashes in helium white dwarf stars", + "abstract": "We investigate the effects of element diffusion on the structure and\nevolution of low-mass helium white dwarfs (WD). Attention is focused on the\noccurrence of hydrogen shell flashes induced by diffusion processes during\ncooling phases. Initial models from 0.406 to 0.161 solar masses are constructed\nby applying mass loss rates at different stages of the RGB evolution of a solar\nmodel. The multicomponent flow equations describing gravitational settling, and\nchemical and thermal diffusion are solved and the diffusion calculations are\ncoupled to an evolutionary code. In addition, the same sequences are computed\nbut neglecting diffusion. We find that element diffusion strongly affects the\nstructure and cooling history of helium WD. In particular, diffusion induces\nthe occurrence of hydrogen shell flashes in models with masses ranging from\n0.18 to 0.41 solar masses, which is in sharp contrast from the situation when\ndiffusion is neglected. In connection with the further evolution, these\ndiffusion-induced flashes lead to much thinner hydrogen envelopes, preventing\nstable nuclear burning from being an appreciable energy source at advanced\nstages of evolution. This implies much shorter cooling ages than in the case\nwhen diffusion is neglected. These new WD models are discussed in light of\nrecent observational data of some millisecond pulsar systems with WD\ncompanions. We find that age discrepancies between the predictions of standard\nevolutionary models and such observations appear to be the result of ignoring\nelement diffusion in such models. Indeed, such discrepancies vanish when\naccount is made of diffusion.", + "authors": "L. G. Althaus, A. M. Serenelli, O. G. Benvenuto", + "published": "2000-12-29", + "updated": "2000-12-29", + "primary_cat": "astro-ph", + "cats": [ + "astro-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1611.06202v2", + "title": "Brownian yet non-Gaussian diffusion: from superstatistics to subordination of diffusing diffusivities", + "abstract": "A growing number of biological, soft, and active matter systems are observed\nto exhibit normal diffusive dynamics with a linear growth of the mean squared\ndisplacement, yet with a non-Gaussian distribution of increments. Based on the\nChubinsky-Slater idea of a diffusing diffusivity we here establish and analyze\na minimal model framework of diffusion processes with fluctuating diffusivity.\nIn particular, we demonstrate the equivalence of the diffusing diffusivity\nprocess with a superstatistical approach with a distribution of diffusivities,\nat times shorter than the diffusivity correlation time. At longer times a\ncrossover to a Gaussian distribution with an effective diffusivity emerges.\nSpecifically, we establish a subordination picture of Brownian but non-Gaussian\ndiffusion processes, that can be used for a wide class of diffusivity\nfluctuation statistics. Our results are shown to be in excellent agreement with\nsimulations and numerical evaluations.", + "authors": "A. V. Chechkin, F. Seno, R. Metzler, I. M. Sokolov", + "published": "2016-11-18", + "updated": "2017-03-30", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech", + "physics.bio-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2106.04745v2", + "title": "Evaluation of diffuse mismatch model for phonon scattering at disordered interfaces", + "abstract": "Diffuse phonon scattering strongly affects the phonon transport through a\ndisordered interface. The often-used diffuse mismatch model assumes that\nphonons lose memory of their origin after being scattered by the interface.\nUsing mode-resolved atomic Green's function simulation, we demonstrate that\ndiffuse phonon scattering by a single disordered interface cannot make a phonon\nlose its memory and thus the applicability of diffusive mismatch model is\nlimited. An analytical expression for diffuse scattering probability based on\nthe continuum approximation is also derived and shown to work reasonably well\nat low frequencies.", + "authors": "Qichen Song, Gang Chen", + "published": "2021-06-09", + "updated": "2021-08-04", + "primary_cat": "cond-mat.mes-hall", + "cats": [ + "cond-mat.mes-hall" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2307.13949v1", + "title": "How Does Diffusion Influence Pretrained Language Models on Out-of-Distribution Data?", + "abstract": "Transformer-based pretrained language models (PLMs) have achieved great\nsuccess in modern NLP. An important advantage of PLMs is good\nout-of-distribution (OOD) robustness. Recently, diffusion models have attracted\na lot of work to apply diffusion to PLMs. It remains under-explored how\ndiffusion influences PLMs on OOD data. The core of diffusion models is a\nforward diffusion process which gradually applies Gaussian noise to inputs, and\na reverse denoising process which removes noise. The noised input\nreconstruction is a fundamental ability of diffusion models. We directly\nanalyze OOD robustness by measuring the reconstruction loss, including testing\nthe abilities to reconstruct OOD data, and to detect OOD samples. Experiments\nare conducted by analyzing different training parameters and data statistical\nfeatures on eight datasets. It shows that finetuning PLMs with diffusion\ndegrades the reconstruction ability on OOD data. The comparison also shows that\ndiffusion models can effectively detect OOD samples, achieving state-of-the-art\nperformance in most of the datasets with an absolute accuracy improvement up to\n18%. These results indicate that diffusion reduces OOD robustness of PLMs.", + "authors": "Huazheng Wang, Daixuan Cheng, Haifeng Sun, Jingyu Wang, Qi Qi, Jianxin Liao, Jing Wang, Cong Liu", + "published": "2023-07-26", + "updated": "2023-07-26", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1009.5965v1", + "title": "Sensitivity of a Babcock-Leighton Flux-Transport Dynamo to Magnetic Diffusivity Profiles", + "abstract": "We study the influence of various magnetic diffusivity profiles on the\nevolution of the poloidal and toroidal magnetic fields in a kinematic flux\ntransport dynamo model for the Sun. The diffusivity is a poorly understood\ningredient in solar dynamo models. We mathematically construct various\ntheoretical profiles of the depth-dependent diffusivity, based on constraints\nfrom mixing length theory and turbulence, and on comparisons of poloidal field\nevolution on the Sun with that from the flux-transport dynamo model.\n We then study the effect of each diffusivity profile in the cyclic evolution\nof the magnetic fields in the Sun, by solving the mean-field dynamo equations.\nWe investigate effects on the solar cycle periods, the maximum tachocline field\nstrengths, and the evolution of the toroidal and poloidal field structures\ninside the convection zone, due to different diffusivity profiles.\n We conduct three experiments: (I) comparing very different magnetic\ndiffusivity profiles; (II) comparing different locations of diffusivity\ngradient near the tachocline for the optimal profile; and (III) comparing\ndifferent slopes of diffusivity gradient for an optimal profile.\n Based on these simulations, we discuss which aspects of depth-dependent\ndiffusivity profiles may be most relevant for magnetic flux evolution in the\nSun, and how certain observations could help improve knowledge of this dynamo\ningredient.", + "authors": "E. J. Zita", + "published": "2010-09-29", + "updated": "2010-09-29", + "primary_cat": "astro-ph.SR", + "cats": [ + "astro-ph.SR", + "physics.flu-dyn" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2403.05794v2", + "title": "Privacy-Preserving Diffusion Model Using Homomorphic Encryption", + "abstract": "In this paper, we introduce a privacy-preserving stable diffusion framework\nleveraging homomorphic encryption, called HE-Diffusion, which primarily focuses\non protecting the denoising phase of the diffusion process. HE-Diffusion is a\ntailored encryption framework specifically designed to align with the unique\narchitecture of stable diffusion, ensuring both privacy and functionality. To\naddress the inherent computational challenges, we propose a novel\nmin-distortion method that enables efficient partial image encryption,\nsignificantly reducing the overhead without compromising the model's output\nquality. Furthermore, we adopt a sparse tensor representation to expedite\ncomputational operations, enhancing the overall efficiency of the\nprivacy-preserving diffusion process. We successfully implement HE-based\nprivacy-preserving stable diffusion inference. The experimental results show\nthat HE-Diffusion achieves 500 times speedup compared with the baseline method,\nand reduces time cost of the homomorphically encrypted inference to the minute\nlevel. Both the performance and accuracy of the HE-Diffusion are on par with\nthe plaintext counterpart. Our approach marks a significant step towards\nintegrating advanced cryptographic techniques with state-of-the-art generative\nmodels, paving the way for privacy-preserving and efficient image generation in\ncritical applications.", + "authors": "Yaojian Chen, Qiben Yan", + "published": "2024-03-09", + "updated": "2024-05-02", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2304.05060v2", + "title": "SPIRiT-Diffusion: Self-Consistency Driven Diffusion Model for Accelerated MRI", + "abstract": "Diffusion models have emerged as a leading methodology for image generation\nand have proven successful in the realm of magnetic resonance imaging (MRI)\nreconstruction. However, existing reconstruction methods based on diffusion\nmodels are primarily formulated in the image domain, making the reconstruction\nquality susceptible to inaccuracies in coil sensitivity maps (CSMs). k-space\ninterpolation methods can effectively address this issue but conventional\ndiffusion models are not readily applicable in k-space interpolation. To\novercome this challenge, we introduce a novel approach called SPIRiT-Diffusion,\nwhich is a diffusion model for k-space interpolation inspired by the iterative\nself-consistent SPIRiT method. Specifically, we utilize the iterative solver of\nthe self-consistent term (i.e., k-space physical prior) in SPIRiT to formulate\na novel stochastic differential equation (SDE) governing the diffusion process.\nSubsequently, k-space data can be interpolated by executing the diffusion\nprocess. This innovative approach highlights the optimization model's role in\ndesigning the SDE in diffusion models, enabling the diffusion process to align\nclosely with the physics inherent in the optimization model, a concept referred\nto as model-driven diffusion. We evaluated the proposed SPIRiT-Diffusion method\nusing a 3D joint intracranial and carotid vessel wall imaging dataset. The\nresults convincingly demonstrate its superiority over image-domain\nreconstruction methods, achieving high reconstruction quality even at a\nsubstantial acceleration rate of 10.", + "authors": "Zhuo-Xu Cui, Chentao Cao, Yue Wang, Sen Jia, Jing Cheng, Xin Liu, Hairong Zheng, Dong Liang, Yanjie Zhu", + "published": "2023-04-11", + "updated": "2024-04-20", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1411.2007v1", + "title": "On large time behavior and selection principle for a diffusive Carr-Penrose Model", + "abstract": "This paper is concerned with the study of a diffusive perturbation of the\nlinear LSW model introduced by Carr and Penrose. A main subject of interest is\nto understand how the presence of diffusion acts as a selection principle,\nwhich singles out a particular self-similar solution of the linear LSW model as\ndetermining the large time behavior of the diffusive model. A selection\nprinciple is rigorously proven for a model which is a semi-classical\napproximation to the diffusive model. Upper bounds on the rate of coarsening\nare also obtained for the full diffusive model.", + "authors": "Joseph G. Conlon, Michael Dabkowski, Jingchen Wu", + "published": "2014-11-07", + "updated": "2014-11-07", + "primary_cat": "math.AP", + "cats": [ + "math.AP", + "35F05" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2404.07771v1", + "title": "An Overview of Diffusion Models: Applications, Guided Generation, Statistical Rates and Optimization", + "abstract": "Diffusion models, a powerful and universal generative AI technology, have\nachieved tremendous success in computer vision, audio, reinforcement learning,\nand computational biology. In these applications, diffusion models provide\nflexible high-dimensional data modeling, and act as a sampler for generating\nnew samples under active guidance towards task-desired properties. Despite the\nsignificant empirical success, theory of diffusion models is very limited,\npotentially slowing down principled methodological innovations for further\nharnessing and improving diffusion models. In this paper, we review emerging\napplications of diffusion models, understanding their sample generation under\nvarious controls. Next, we overview the existing theories of diffusion models,\ncovering their statistical properties and sampling capabilities. We adopt a\nprogressive routine, beginning with unconditional diffusion models and\nconnecting to conditional counterparts. Further, we review a new avenue in\nhigh-dimensional structured optimization through conditional diffusion models,\nwhere searching for solutions is reformulated as a conditional sampling problem\nand solved by diffusion models. Lastly, we discuss future directions about\ndiffusion models. The purpose of this paper is to provide a well-rounded\ntheoretical exposure for stimulating forward-looking theories and methods of\ndiffusion models.", + "authors": "Minshuo Chen, Song Mei, Jianqing Fan, Mengdi Wang", + "published": "2024-04-11", + "updated": "2024-04-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "math.ST", + "stat.ML", + "stat.TH" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2012.06816v1", + "title": "Evaluation and Comparison of Diffusion Models with Motif Features", + "abstract": "Diffusion models simulate the propagation of influence in networks. The\ndesign and evaluation of diffusion models has been subjective and empirical.\nWhen being applied to a network represented by a graph, the diffusion model\ngenerates a sequence of edges on which the influence flows, such sequence forms\na temporal network. In most scenarios, the statistical properties or the\ncharacteristics of a network are inferred by analyzing the temporal networks\ngenerated by diffusion models. To analyze real temporal networks, the motif has\nbeen proposed as a reliable feature. However, it is unclear how the network\ntopology and the diffusion model affect the motif feature of a generated\ntemporal network. In this paper, we adopt the motif feature to evaluate the\ntemporal graph generated by a diffusion model, thence the diffusion model\nitself. Two benchmarks for quantitively evaluating diffusion models with motif,\nstability and separability, are proposed and measured on numerous diffusion\nmodels. One motif-based metric is proposed to measure the similarity between\ndiffusion models. The experiments suggest that the motif of a generated\ntemporal network is dominated by the diffusion model, while the network\ntopology is almost ignored. This result indicates that more practical and\nreliable diffusion models have to be designed with delicacy in order to capture\nthe propagation patterns of real temporal networks.", + "authors": "Fangqi Li", + "published": "2020-12-12", + "updated": "2020-12-12", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "cs.NI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1210.5101v1", + "title": "Global well-posedness and zero-diffusion limit of classical solutions to the 3D conservation laws arising in chemotaxis", + "abstract": "In this paper, we study the relationship between a diffusive model and a\nnon-diffusive model which are both derived from the well-known Keller-Segel\nmodel, as a coefficient of diffusion $\\varepsilon$ goes to zero. First, we\nestablish the global well-posedness of classical solutions to the Cauchy\nproblem for the diffusive model with smooth initial data which is of small\n$L^2$ norm, together with some {\\it a priori} estimates uniform for $t$ and\n$\\varepsilon$. Then we investigate the zero-diffusion limit, and get the global\nwell-posedness of classical solutions to the Cauchy problem for the\nnon-diffusive model. Finally, we derive the convergence rate of the diffusive\nmodel toward the non-diffusive model. It is shown that the convergence rate in\n$L^\\infty$ norm is of the order $O(\\varepsilon^{1/2})$. It should be noted that\nthe initial data is small in $L^2$-norm but can be of large oscillations with\nconstant state at far field. As a byproduct, we improve the corresponding\nresult on the well-posedness of the non-difussive model which requires small\noscillations.", + "authors": "Hongyun Peng, Huanyao Wen, Changjiang Zhu", + "published": "2012-10-18", + "updated": "2012-10-18", + "primary_cat": "math.AP", + "cats": [ + "math.AP" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/math/0204289v1", + "title": "On diffusion approximation with discontinuous coefficients", + "abstract": "Convergence of stochastic processes with jumps to diffusion processes is\ninvestigated in the case when the limit process has discontinuous coefficients.\n An example is given in which the diffusion approximation of a queueing model\nyields a diffusion process with discontinuous diffusion and drift coefficients.", + "authors": "N. V. Krylov, R. Liptser", + "published": "2002-04-24", + "updated": "2002-04-24", + "primary_cat": "math.PR", + "cats": [ + "math.PR", + "math.SG", + "60B10; 60K25}" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/cond-mat/0210703v1", + "title": "Membrane bound protein diffusion viewed by fluorescence recovery after bleaching experiments : models analysis", + "abstract": "Diffusion processes in biological membranes are of interest to understand the\nmacromolecular organisation and function of several molecules. Fluorescence\nRecovery After Photobleaching (FRAP) has been widely used as a method to\nanalyse this processes using classical Brownian diffusion model. In the first\npart of this work, the analytical expression of the fluorescence recovery as a\nfunction of time has been established for anomalous diffusion due to long\nwaiting times. Then, experimental fluorescence recoveries recorded in living\ncells on a membrane-bound protein have been analysed using three different\nmodels : normal Brownian diffusion, Brownian diffusion with an immobile\nfraction and anomalous diffusion due to long waiting times.", + "authors": "C. Favard, N. Olivi-Tran, J. -L. Meunier", + "published": "2002-10-31", + "updated": "2002-10-31", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech", + "physics.bio-ph", + "q-bio.BM" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2303.06574v2", + "title": "Diffusion Models for Non-autoregressive Text Generation: A Survey", + "abstract": "Non-autoregressive (NAR) text generation has attracted much attention in the\nfield of natural language processing, which greatly reduces the inference\nlatency but has to sacrifice the generation accuracy. Recently, diffusion\nmodels, a class of latent variable generative models, have been introduced into\nNAR text generation, showing an improved text generation quality. In this\nsurvey, we review the recent progress in diffusion models for NAR text\ngeneration. As the background, we first present the general definition of\ndiffusion models and the text diffusion models, and then discuss their merits\nfor NAR generation. As the core content, we further introduce two mainstream\ndiffusion models in existing work of text diffusion, and review the key designs\nof the diffusion process. Moreover, we discuss the utilization of pre-trained\nlanguage models (PLMs) for text diffusion models and introduce optimization\ntechniques for text data. Finally, we discuss several promising directions and\nconclude this paper. Our survey aims to provide researchers with a systematic\nreference of related research on text diffusion models for NAR generation. We\npresent our collection of text diffusion models at\nhttps://github.com/RUCAIBox/Awesome-Text-Diffusion-Models.", + "authors": "Yifan Li, Kun Zhou, Wayne Xin Zhao, Ji-Rong Wen", + "published": "2023-03-12", + "updated": "2023-05-13", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2308.06342v2", + "title": "Mirror Diffusion Models", + "abstract": "Diffusion models have successfully been applied to generative tasks in\nvarious continuous domains. However, applying diffusion to discrete categorical\ndata remains a non-trivial task. Moreover, generation in continuous domains\noften requires clipping in practice, which motivates the need for a theoretical\nframework for adapting diffusion to constrained domains. Inspired by the mirror\nLangevin algorithm for the constrained sampling problem, in this theoretical\nreport we propose Mirror Diffusion Models (MDMs). We demonstrate MDMs in the\ncontext of simplex diffusion and propose natural extensions to popular domains\nsuch as image and text generation.", + "authors": "Jaesung Tae", + "published": "2023-08-11", + "updated": "2023-08-18", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2301.00527v1", + "title": "Diffusion Probabilistic Models for Scene-Scale 3D Categorical Data", + "abstract": "In this paper, we learn a diffusion model to generate 3D data on a\nscene-scale. Specifically, our model crafts a 3D scene consisting of multiple\nobjects, while recent diffusion research has focused on a single object. To\nrealize our goal, we represent a scene with discrete class labels, i.e.,\ncategorical distribution, to assign multiple objects into semantic categories.\nThus, we extend discrete diffusion models to learn scene-scale categorical\ndistributions. In addition, we validate that a latent diffusion model can\nreduce computation costs for training and deploying. To the best of our\nknowledge, our work is the first to apply discrete and latent diffusion for 3D\ncategorical data on a scene-scale. We further propose to perform semantic scene\ncompletion (SSC) by learning a conditional distribution using our diffusion\nmodel, where the condition is a partial observation in a sparse point cloud. In\nexperiments, we empirically show that our diffusion models not only generate\nreasonable scenes, but also perform the scene completion task better than a\ndiscriminative model. Our code and models are available at\nhttps://github.com/zoomin-lee/scene-scale-diffusion", + "authors": "Jumin Lee, Woobin Im, Sebin Lee, Sung-Eui Yoon", + "published": "2023-01-02", + "updated": "2023-01-02", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2306.03436v2", + "title": "Intellectual Property Protection of Diffusion Models via the Watermark Diffusion Process", + "abstract": "Diffusion models have rapidly become a vital part of deep generative\narchitectures, given today's increasing demands. Obtaining large,\nhigh-performance diffusion models demands significant resources, highlighting\ntheir importance as intellectual property worth protecting. However, existing\nwatermarking techniques for ownership verification are insufficient when\napplied to diffusion models. Very recent research in watermarking diffusion\nmodels either exposes watermarks during task generation, which harms the\nimperceptibility, or is developed for conditional diffusion models that require\nprompts to trigger the watermark. This paper introduces WDM, a novel\nwatermarking solution for diffusion models without imprinting the watermark\nduring task generation. It involves training a model to concurrently learn a\nWatermark Diffusion Process (WDP) for embedding watermarks alongside the\nstandard diffusion process for task generation. We provide a detailed\ntheoretical analysis of WDP training and sampling, relating it to a shifted\nGaussian diffusion process via the same reverse noise. Extensive experiments\nare conducted to validate the effectiveness and robustness of our approach in\nvarious trigger and watermark data configurations.", + "authors": "Sen Peng, Yufei Chen, Cong Wang, Xiaohua Jia", + "published": "2023-06-06", + "updated": "2023-11-29", + "primary_cat": "cs.CR", + "cats": [ + "cs.CR", + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2302.07261v2", + "title": "Where to Diffuse, How to Diffuse, and How to Get Back: Automated Learning for Multivariate Diffusions", + "abstract": "Diffusion-based generative models (DBGMs) perturb data to a target noise\ndistribution and reverse this process to generate samples. The choice of\nnoising process, or inference diffusion process, affects both likelihoods and\nsample quality. For example, extending the inference process with auxiliary\nvariables leads to improved sample quality. While there are many such\nmultivariate diffusions to explore, each new one requires significant\nmodel-specific analysis, hindering rapid prototyping and evaluation. In this\nwork, we study Multivariate Diffusion Models (MDMs). For any number of\nauxiliary variables, we provide a recipe for maximizing a lower-bound on the\nMDMs likelihood without requiring any model-specific analysis. We then\ndemonstrate how to parameterize the diffusion for a specified target noise\ndistribution; these two points together enable optimizing the inference\ndiffusion process. Optimizing the diffusion expands easy experimentation from\njust a few well-known processes to an automatic search over all linear\ndiffusions. To demonstrate these ideas, we introduce two new specific\ndiffusions as well as learn a diffusion process on the MNIST, CIFAR10, and\nImageNet32 datasets. We show learned MDMs match or surpass bits-per-dims (BPDs)\nrelative to fixed choices of diffusions for a given dataset and model\narchitecture.", + "authors": "Raghav Singhal, Mark Goldstein, Rajesh Ranganath", + "published": "2023-02-14", + "updated": "2023-03-03", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2310.01221v2", + "title": "Nonlocal diffusion model with maximum principle", + "abstract": "In this paper, we propose nonlocal diffusion models with Dirichlet boundary.\nThese nonlocal diffusion models preserve the maximum principle and also have\ncorresponding variational form. With these good properties, It is relatively\neasy to prove the well-posedness and the vanishing nonlocality convergence.\nFurthermore, by specifically designed weight function, we can get a nonlocal\ndiffusion model with second order convergence which is optimal for nonlocal\ndiffusion models.", + "authors": "Zuoqiang Shi", + "published": "2023-10-02", + "updated": "2023-10-12", + "primary_cat": "math.AP", + "cats": [ + "math.AP", + "cs.NA", + "math.NA" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/0912.3770v1", + "title": "SLE(6) and the geometry of diffusion fronts", + "abstract": "We study the diffusion front for a natural two-dimensional model where many\nparticles starting at the origin diffuse independently. It turns out that this\nmodel can be described using properties of near-critical percolation, and\nprovides a natural example where critical fractal geometries spontaneously\narise.", + "authors": "Pierre Nolin", + "published": "2009-12-18", + "updated": "2009-12-18", + "primary_cat": "math.PR", + "cats": [ + "math.PR" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1404.3573v1", + "title": "\"Diffusing diffusivity\": A model for anomalous and \"anomalous yet Brownian\" diffusion", + "abstract": "Wang et al. [PNAS 106 (2009) 15160] have found that in several systems the\nlinear time dependence of the mean-square displacement (MSD) of diffusing\ncolloidal particles, typical of normal diffusion, is accompanied by a\nnon-Gaussian displacement distribution (DisD), with roughly exponential tails\nat short times, a situation they termed \"anomalous yet Brownian\" diffusion. The\ndiversity of systems in which this is observed calls for a generic model. We\npresent such a model where there is \"diffusivity memory\" but no \"direction\nmemory\" in the particle trajectory, and we show that it leads to both a linear\nMSD and a non-Gaussian DisD at short times. In our model, the diffusivity is\nundergoing a (perhaps biased) random walk, hence the expression \"diffusing\ndiffusivity\". The DisD is predicted to be exactly exponential at short times if\nthe distribution of diffusivities is itself exponential, but an exponential\nremains a good fit to the DisD for a variety of diffusivity distributions.\nMoreover, our generic model can be modified to produce subdiffusion.", + "authors": "Mykyta V. Chubynsky, Gary W. Slater", + "published": "2014-04-14", + "updated": "2014-04-14", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech", + "cond-mat.soft" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1905.04004v2", + "title": "Well-posedness of a cross-diffusion population model with nonlocal diffusion", + "abstract": "We prove the existence and uniqueness of solution of a nonlocal\ncross-diffusion competitive population model for two species. The model may be\nconsidered as a version, or even an approximation, of the paradigmatic\nShigesada-Kawasaki-Teramoto cross-diffusion model, in which the usual diffusion\ndifferential operator is replaced by an integral diffusion operator. The proof\nof existence of solutions is based on a compactness argument, while the\nuniqueness of solution is achieved through a duality technique.", + "authors": "Gonzalo Galiano, Juli\u00e1n Velasco", + "published": "2019-05-10", + "updated": "2024-01-24", + "primary_cat": "math.AP", + "cats": [ + "math.AP" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1503.03201v2", + "title": "Fractional Diffusion Equations for Lattice and Continuum: Grunwald-Letnikov Differences and Derivatives Approach", + "abstract": "Fractional diffusion equations for three-dimensional lattice models based on\nfractional-order differences of the Grunwald-Letnikov type are suggested. These\nlattice fractional diffusion equations contain difference operators that\ndescribe long-range jumps from one lattice site to other. In continuum limit,\nthe suggested lattice diffusion equations with non-integer order differences\ngive the diffusion equations with the Grunwald-Letnikov fractional derivatives\nfor continuum. We propose a consistent derivation of the fractional diffusion\nequation with the fractional derivatives of Grunwald-Letnikov type. The\nsuggested lattice diffusion equations can be considered as a new\nmicrostructural basis of space-fractional diffusion in nonlocal media.", + "authors": "Vasily E. Tarasov", + "published": "2015-03-11", + "updated": "2015-03-12", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/nlin/0212039v2", + "title": "Front dynamics in reaction-diffusion systems with Levy flights: a fractional diffusion approach", + "abstract": "The use of reaction-diffusion models rests on the key assumption that the\nunderlying diffusive process is Gaussian. However, a growing number of studies\nhave pointed out the prevalence of anomalous diffusion, and there is a need to\nunderstand the dynamics of reactive systems in the presence of this type of\nnon-Gaussian diffusion. Here we present a study of front dynamics in\nreaction-diffusion systems where anomalous diffusion is due to the presence of\nasymmetric Levy flights. Our approach consists of replacing the Laplacian\ndiffusion operator by a fractional diffusion operator, whose fundamental\nsolutions are Levy $\\alpha$-stable distributions. Numerical simulation of the\nfractional Fisher-Kolmogorov equation, and analytical arguments show that\nanomalous diffusion leads to the exponential acceleration of fronts and a\nuniversal power law decay, $x^{-\\alpha}$, of the tail, where $\\alpha$, the\nindex of the Levy distribution, is the order of the fractional derivative.", + "authors": "D. del-Castillo-Negrete, B. A. Carreras, V. E. Lynch", + "published": "2002-12-17", + "updated": "2003-06-30", + "primary_cat": "nlin.PS", + "cats": [ + "nlin.PS", + "nlin.CD" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2301.00059v2", + "title": "Describing NMR chemical exchange by effective phase diffusion approach", + "abstract": "This paper proposes an effective phase diffusion method to analyze chemical\nexchange in nuclear magnetic resonance (NMR). The chemical exchange involves\nspin jumps around different sites where the spin angular frequencies vary,\nwhich leads to a random phase walk viewed from the rotating frame reference.\nTherefore, the random walk in phase space can be treated by the effective phase\ndiffusion method. Both the coupled and uncoupled phase diffusions are\nconsidered; additionally, it includes normal diffusion as well as fractional\ndiffusion. Based on these phase diffusion equations, the line shape of NMR\nexchange spectrum can be analyzed. By comparing these theoretical results with\nthe conventional theory, this phase diffusion approach works for fast exchange,\nranging from slightly faster than intermediate exchange to very fast exchange.\nFor normal diffusion models, the theoretically predicted curves agree with\nthose predicted from traditional models in the literature, and the\ncharacteristic exchange time obtained from phase diffusion with a fixed jump\ntime is the same as that obtained from the conventional model. However, the\nphase diffusion with a monoexponential time distribution gives a characteristic\nexchange time constant which is half of that obtained from the traditional\nmodel. Additionally, the fractional diffusion obtains a significantly different\nline shape than that predicted based on normal diffusion.", + "authors": "Guoxing Lin", + "published": "2022-12-30", + "updated": "2023-05-17", + "primary_cat": "physics.chem-ph", + "cats": [ + "physics.chem-ph", + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1609.04658v1", + "title": "Analyzing Signal Attenuation in PFG Anomalous Diffusion via a Modified Gaussian Phase Distribution Approximation Based on Fractal Derivative Model", + "abstract": "Pulsed field gradient (PFG) has been increasingly employed to study anomalous\ndiffusions in Nuclear Magnetic Resonance (NMR) and Magnetic Resonance Imaging\n(MRI). However, the analysis of PFG anomalous diffusion is complicated. In this\npaper, a fractal derivative model based modified Gaussian phase distribution\nmethod is proposed to describe PFG anomalous diffusion. By using the phase\ndistribution obtained from the effective phase shift diffusion method based on\nfractal derivatives, and employing some of the traditional Gaussian phase\ndistribution approximation techniques, a general signal attenuation expression\nfor free fractional diffusion is derived. This expression describes a stretched\nexponential function based attenuation, which is distinct from both the\nexponential attenuation for normal diffusion obtained from conventional\nGaussian phase distribution approximation, and the Mittag-Leffler function\nbased attenuation for anomalous diffusion obtained from fractional derivative.\nThe obtained signal attenuation expression can analyze the finite gradient\npulse width (FGPW) effect. Additionally, it can generally be applied to all\nthree types of PFG fractional diffusions classified based on time derivative\norder alpha and space derivative order beta. These three types of fractional\ndiffusions include time-fractional diffusion, space-fractional diffusion, and\ngeneral fractional diffusion. The results in this paper are consistent with\nreported results based on effective phase shift diffusion equation method and\ninstantaneous signal attenuation method. This method provides a new, convenient\napproximation formalism for analyzing PFG anomalous diffusion experiments.", + "authors": "Guoxing Lin", + "published": "2016-09-15", + "updated": "2016-09-15", + "primary_cat": "physics.chem-ph", + "cats": [ + "physics.chem-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2303.16203v3", + "title": "Your Diffusion Model is Secretly a Zero-Shot Classifier", + "abstract": "The recent wave of large-scale text-to-image diffusion models has\ndramatically increased our text-based image generation abilities. These models\ncan generate realistic images for a staggering variety of prompts and exhibit\nimpressive compositional generalization abilities. Almost all use cases thus\nfar have solely focused on sampling; however, diffusion models can also provide\nconditional density estimates, which are useful for tasks beyond image\ngeneration. In this paper, we show that the density estimates from large-scale\ntext-to-image diffusion models like Stable Diffusion can be leveraged to\nperform zero-shot classification without any additional training. Our\ngenerative approach to classification, which we call Diffusion Classifier,\nattains strong results on a variety of benchmarks and outperforms alternative\nmethods of extracting knowledge from diffusion models. Although a gap remains\nbetween generative and discriminative approaches on zero-shot recognition\ntasks, our diffusion-based approach has significantly stronger multimodal\ncompositional reasoning ability than competing discriminative approaches.\nFinally, we use Diffusion Classifier to extract standard classifiers from\nclass-conditional diffusion models trained on ImageNet. Our models achieve\nstrong classification performance using only weak augmentations and exhibit\nqualitatively better \"effective robustness\" to distribution shift. Overall, our\nresults are a step toward using generative over discriminative models for\ndownstream tasks. Results and visualizations at\nhttps://diffusion-classifier.github.io/", + "authors": "Alexander C. Li, Mihir Prabhudesai, Shivam Duggal, Ellis Brown, Deepak Pathak", + "published": "2023-03-28", + "updated": "2023-09-13", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI", + "cs.CV", + "cs.NE", + "cs.RO" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.08379v2", + "title": "TESS: Text-to-Text Self-Conditioned Simplex Diffusion", + "abstract": "Diffusion models have emerged as a powerful paradigm for generation,\nobtaining strong performance in various continuous domains. However, applying\ncontinuous diffusion models to natural language remains challenging due to its\ndiscrete nature and the need for a large number of diffusion steps to generate\ntext, making diffusion-based generation expensive. In this work, we propose\nText-to-text Self-conditioned Simplex Diffusion (TESS), a text diffusion model\nthat is fully non-autoregressive, employs a new form of self-conditioning, and\napplies the diffusion process on the logit simplex space rather than the\nlearned embedding space. Through extensive experiments on natural language\nunderstanding and generation tasks including summarization, text\nsimplification, paraphrase generation, and question generation, we demonstrate\nthat TESS outperforms state-of-the-art non-autoregressive models, requires\nfewer diffusion steps with minimal drop in performance, and is competitive with\npretrained autoregressive sequence-to-sequence models. We publicly release our\ncodebase at https://github.com/allenai/tess-diffusion.", + "authors": "Rabeeh Karimi Mahabadi, Hamish Ivison, Jaesung Tae, James Henderson, Iz Beltagy, Matthew E. Peters, Arman Cohan", + "published": "2023-05-15", + "updated": "2024-02-21", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2403.15766v1", + "title": "BEND: Bagging Deep Learning Training Based on Efficient Neural Network Diffusion", + "abstract": "Bagging has achieved great success in the field of machine learning by\nintegrating multiple base classifiers to build a single strong classifier to\nreduce model variance. The performance improvement of bagging mainly relies on\nthe number and diversity of base classifiers. However, traditional deep\nlearning model training methods are expensive to train individually and\ndifficult to train multiple models with low similarity in a restricted dataset.\nRecently, diffusion models, which have been tremendously successful in the\nfields of imaging and vision, have been found to be effective in generating\nneural network model weights and biases with diversity. We creatively propose a\nBagging deep learning training algorithm based on Efficient Neural network\nDiffusion (BEND). The originality of BEND comes from the first use of a neural\nnetwork diffusion model to efficiently build base classifiers for bagging. Our\napproach is simple but effective, first using multiple trained model weights\nand biases as inputs to train autoencoder and latent diffusion model to realize\na diffusion model from noise to valid neural network parameters. Subsequently,\nwe generate several base classifiers using the trained diffusion model.\nFinally, we integrate these ba se classifiers for various inference tasks using\nthe Bagging method. Resulting experiments on multiple models and datasets show\nthat our proposed BEND algorithm can consistently outperform the mean and\nmedian accuracies of both the original trained model and the diffused model. At\nthe same time, new models diffused using the diffusion model have higher\ndiversity and lower cost than multiple models trained using traditional\nmethods. The BEND approach successfully introduces diffusion models into the\nnew deep learning training domain and provides a new paradigm for future deep\nlearning training and inference.", + "authors": "Jia Wei, Xingjun Zhang, Witold Pedrycz", + "published": "2024-03-23", + "updated": "2024-03-23", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1304.0925v1", + "title": "A new approach to multi-modal diffusions with applications to protein folding", + "abstract": "This article demonstrates that flexible and statistically tractable\nmulti-modal diffusion models can be attained by transformation of simple\nwell-known diffusion models such as the Ornstein-Uhlenbeck model, or more\ngenerally a Pearson diffusion. The transformed diffusion inherits many\nproperties of the underlying simple diffusion including its mixing rates and\ndistributions of first passage times. Likelihood inference and martingale\nestimating functions are considered in the case of a discretely observed\nbimodal diffusion. It is further demonstrated that model parameters can be\nidentified and estimated when the diffusion is observed with additional\nmeasurement error. The new approach is applied to molecular dynamics data in\nform of a reaction coordinate of the small Trp-zipper protein, for which the\nfolding and unfolding rates are estimated. The new models provide a better fit\nto this type of protein folding data than previous models because the diffusion\ncoefficient is state-dependent.", + "authors": "Julie Forman, Michael S\u00f8rensen", + "published": "2013-04-03", + "updated": "2013-04-03", + "primary_cat": "stat.ME", + "cats": [ + "stat.ME" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2002.02101v1", + "title": "Trace of anomalous diffusion in a biased quenched trap model", + "abstract": "Diffusion on a quenched heterogeneous environment in the presence of bias is\nconsidered analytically. The first-passage-time statistics can be applied to\nobtain the drift and the diffusion coefficient in periodic quenched\nenvironments. We show several transition points at which sample-to-sample\nfluctuations of the drift or the diffusion coefficient remain large even when\nthe system size becomes large, i.e., non-self-averaging. Moreover, we find that\nthe disorder average of the diffusion coefficient diverges or becomes zero when\nthe corresponding annealed model generates superdiffusion or subdiffusion,\nrespectively. This result implies that anomalous diffusion in an annealed model\nis traced by anomaly of the diffusion coefficients in the corresponding\nquenched model.", + "authors": "Takuma Akimoto, Keiji Saito", + "published": "2020-02-06", + "updated": "2020-02-06", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2005.00562v1", + "title": "Unexpected crossovers in correlated random-diffusivity processes", + "abstract": "The passive and active motion of micron-sized tracer particles in crowded\nliquids and inside living biological cells is ubiquitously characterised by\n\"viscoelastic\" anomalous diffusion, in which the increments of the motion\nfeature long-ranged negative and positive correlations. While viscoelastic\nanomalous diffusion is typically modelled by a Gaussian process with correlated\nincrements, so-called fractional Gaussian noise, an increasing number of\nsystems are reported, in which viscoelastic anomalous diffusion is paired with\nnon-Gaussian displacement distributions. Following recent advances in Brownian\nyet non-Gaussian diffusion we here introduce and discuss several possible\nversions of random-diffusivity models with long-ranged correlations. While all\nthese models show a crossover from non-Gaussian to Gaussian distributions\nbeyond some correlation time, their mean squared displacements exhibit\nstrikingly different behaviours: depending on the model crossovers from\nanomalous to normal diffusion are observed, as well as unexpected dependencies\nof the effective diffusion coefficient on the correlation exponent. Our\nobservations of the strong non-universality of random-diffusivity viscoelastic\nanomalous diffusion are important for the analysis of experiments and a better\nunderstanding of the physical origins of \"viscoelastic yet non-Gaussian\"\ndiffusion.", + "authors": "Wei Wang, Flavio Seno, Igor M. Sokolov, Aleksei V. Chechkin, Ralf Metzler", + "published": "2020-05-01", + "updated": "2020-05-01", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech", + "physics.bio-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2104.13565v2", + "title": "Generalisation of continuous time random walk to anomalous diffusion MRI models with an age-related evaluation of human corpus callosum", + "abstract": "Diffusion MRI measures of the human brain provide key insight into\nmicrostructural variations across individuals and into the impact of central\nnervous system diseases and disorders. One approach to extract information from\ndiffusion signals has been to use biologically relevant analytical models to\nlink millimetre scale diffusion MRI measures with microscale influences. The\nother approach has been to represent diffusion as an anomalous transport\nprocess and infer microstructural information from the different anomalous\ndiffusion equation parameters. In this study, we investigated how parameters of\nvarious anomalous diffusion models vary with age in the human brain white\nmatter, particularly focusing on the corpus callosum. We first unified several\nestablished anomalous diffusion models (the super-diffusion, sub-diffusion,\nquasi-diffusion and fractional Bloch-Torrey models) under the continuous time\nrandom walk modelling framework. This unification allows a consistent parameter\nfitting strategy to be applied from which meaningful model parameter\ncomparisons can be made. We then provided a novel way to derive the diffusional\nkurtosis imaging (DKI) model, which is shown to be a degree two approximation\nof the sub-diffusion model. This link between the DKI and sub-diffusion models\nled to a new robust technique for generating maps of kurtosis and diffusivity\nusing the sub-diffusion parameters \\b{eta}_SUB and D_SUB. Superior tissue\ncontrast is achieved in kurtosis maps based on the sub-diffusion model. 7T\ndiffusion weighted MRI data for 65 healthy participants in the age range 19-78\nyears was used in this study. Results revealed that anomalous diffusion model\nparameters {\\alpha} and \\b{eta} have shown consistent positive correlation with\nage in the corpus callosum, indicating {\\alpha} and \\b{eta} are sensitive to\ntissue microstructural changes in aging.", + "authors": "Qianqian Yang, David C. Reutens, Viktor Vegh", + "published": "2021-04-28", + "updated": "2022-01-17", + "primary_cat": "physics.med-ph", + "cats": [ + "physics.med-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2302.05737v2", + "title": "A Reparameterized Discrete Diffusion Model for Text Generation", + "abstract": "This work studies discrete diffusion probabilistic models with applications\nto natural language generation. We derive an alternative yet equivalent\nformulation of the sampling from discrete diffusion processes and leverage this\ninsight to develop a family of reparameterized discrete diffusion models. The\nderived generic framework is highly flexible, offers a fresh perspective of the\ngeneration process in discrete diffusion models, and features more effective\ntraining and decoding techniques. We conduct extensive experiments to evaluate\nthe text generation capability of our model, demonstrating significant\nimprovements over existing diffusion models.", + "authors": "Lin Zheng, Jianbo Yuan, Lei Yu, Lingpeng Kong", + "published": "2023-02-11", + "updated": "2024-02-03", + "primary_cat": "cs.CL", + "cats": [ + "cs.CL", + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.12377v1", + "title": "The vanishing diffusion limit for an Oldroyd-B model in $\\mathbb{R}^2_+$", + "abstract": "We consider the initial-boundary value problem for an incompressible\nOldroyd-B model with stress diffusion in two-dimensional upper half plane which\ndescribes the motion of viscoelastic polymeric fluids. From the physical point\nof view, the diffusive coefficient is several orders of magnitude smaller than\nother parameters in the model, and is usually assumed to be zero. However, the\nlink between the diffusive model and the standard one (zero diffusion) via\nvanishing diffusion limit is still unknown from the mathematical point of view,\nin particular for the problem with boundary. Some numerical results [13]\nsuggest that this should be true. In this work, we provide a rigorous\njustification for the vanishing diffusion in $L^\\infty$-norm.", + "authors": "Yinghui Wang, Huanyao Wen", + "published": "2023-05-21", + "updated": "2023-05-21", + "primary_cat": "math.AP", + "cats": [ + "math.AP", + "35Q35, 76A10, 76D10" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.10028v1", + "title": "Pyramid Diffusion Models For Low-light Image Enhancement", + "abstract": "Recovering noise-covered details from low-light images is challenging, and\nthe results given by previous methods leave room for improvement. Recent\ndiffusion models show realistic and detailed image generation through a\nsequence of denoising refinements and motivate us to introduce them to\nlow-light image enhancement for recovering realistic details. However, we found\ntwo problems when doing this, i.e., 1) diffusion models keep constant\nresolution in one reverse process, which limits the speed; 2) diffusion models\nsometimes result in global degradation (e.g., RGB shift). To address the above\nproblems, this paper proposes a Pyramid Diffusion model (PyDiff) for low-light\nimage enhancement. PyDiff uses a novel pyramid diffusion method to perform\nsampling in a pyramid resolution style (i.e., progressively increasing\nresolution in one reverse process). Pyramid diffusion makes PyDiff much faster\nthan vanilla diffusion models and introduces no performance degradation.\nFurthermore, PyDiff uses a global corrector to alleviate the global degradation\nthat may occur in the reverse process, significantly improving the performance\nand making the training of diffusion models easier with little additional\ncomputational consumption. Extensive experiments on popular benchmarks show\nthat PyDiff achieves superior performance and efficiency. Moreover, PyDiff can\ngeneralize well to unseen noise and illumination distributions.", + "authors": "Dewei Zhou, Zongxin Yang, Yi Yang", + "published": "2023-05-17", + "updated": "2023-05-17", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/0910.2253v1", + "title": "Linearized Kompaneetz equation as a relativistic diffusion", + "abstract": "We show that Kompaneetz equation describing photon diffusion in an\nenvironment of an electron gas, when linearized around its equilibrium\ndistribution, coincides with the relativistic diffusion discussed in recent\npublications. The model of the relativistic diffusion is related to soluble\nmodels of imaginary time quantum mechanics. We suggest some non-linear\ngeneralizations of the relativistic diffusion equation and their astrophysical\napplications (in particular to the Sunyaev-Zeldovich effect).", + "authors": "Z. Haba", + "published": "2009-10-12", + "updated": "2009-10-12", + "primary_cat": "astro-ph.CO", + "cats": [ + "astro-ph.CO" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2110.14851v1", + "title": "Behavior of Spiral Wave Spectra with a Rank-Deficient Diffusion Matrix", + "abstract": "Spiral waves emerge in numerous pattern forming systems and are commonly\nmodeled with reaction-diffusion systems. Some systems used to model biological\nprocesses, such as ion-channel models, fall under the reaction-diffusion\ncategory and often have one or more non-diffusing species which results in a\nrank-deficient diffusion matrix. Previous theoretical research focused on\nspiral spectra for strictly positive diffusion matrices. In this paper, we use\na general two-variable reaction-diffusion system to compare the essential and\nabsolute spectra of spiral waves for strictly positive and rank-deficient\ndiffusion matrices. We show that the essential spectrum is not continuous in\nthe limit of vanishing diffusion in one component. Moreover, we predict\nlocations for the absolute spectrum in the case of a non-diffusing slow\nvariable. Predictions are confirmed numerically for the Barkley and Karma\nmodels.", + "authors": "Stephanie Dodson, Bjorn Sandstede", + "published": "2021-10-28", + "updated": "2021-10-28", + "primary_cat": "math.DS", + "cats": [ + "math.DS" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2111.03914v2", + "title": "A systematic approach for modeling a nonlocal eddy diffusivity", + "abstract": "This study considers advective and diffusive transport of passive scalar\nfields by spatially-varying incompressible flows. Prior studies have shown that\nthe eddy diffusivities governing the mean field transport in such systems can\ngenerally be nonlocal in space and time. While for many flows nonlocal eddy\ndiffusivities are more accurate than commonly-used Boussinesq eddy\ndiffusivities, nonlocal eddy diffusivities are often computationally\ncost-prohibitive to obtain and difficult to implement in practice. We develop a\nsystematic and more cost-effective approach for modeling nonlocal eddy\ndiffusivities using matched moment inverse (MMI) operators. These operators are\nconstructed using only a few leading-order moments of the exact nonlocal eddy\ndiffusivity kernel, which can be easily computed using the inverse macroscopic\nforcing method (IMFM) (Mani and Park (2021)). The resulting reduced-order\nmodels for the mean fields that incorporate the modeled eddy diffusivities\noften improve Boussinesq-limit models since they capture leading-order nonlocal\neffects. But more importantly, these models can be expressed as partial\ndifferential equations that are readily solvable using existing computational\nfluid dynamics capabilities rather than as integro-partial differential\nequations.", + "authors": "Jessie Liu, Hannah Williams, Ali Mani", + "published": "2021-11-06", + "updated": "2023-06-28", + "primary_cat": "physics.flu-dyn", + "cats": [ + "physics.flu-dyn" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.16269v1", + "title": "UDPM: Upsampling Diffusion Probabilistic Models", + "abstract": "In recent years, Denoising Diffusion Probabilistic Models (DDPM) have caught\nsignificant attention. By composing a Markovian process that starts in the data\ndomain and then gradually adds noise until reaching pure white noise, they\nachieve superior performance in learning data distributions. Yet, these models\nrequire a large number of diffusion steps to produce aesthetically pleasing\nsamples, which is inefficient. In addition, unlike common generative\nadversarial networks, the latent space of diffusion models is not\ninterpretable. In this work, we propose to generalize the denoising diffusion\nprocess into an Upsampling Diffusion Probabilistic Model (UDPM), in which we\nreduce the latent variable dimension in addition to the traditional noise level\naddition. As a result, we are able to sample images of size $256\\times 256$\nwith only 7 diffusion steps, which is less than two orders of magnitude\ncompared to standard DDPMs. We formally develop the Markovian diffusion\nprocesses of the UDPM, and demonstrate its generation capabilities on the\npopular FFHQ, LSUN horses, ImageNet, and AFHQv2 datasets. Another favorable\nproperty of UDPM is that it is very easy to interpolate its latent space, which\nis not the case with standard diffusion models. Our code is available online\n\\url{https://github.com/shadyabh/UDPM}", + "authors": "Shady Abu-Hussein, Raja Giryes", + "published": "2023-05-25", + "updated": "2023-05-25", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.LG", + "eess.IV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1711.09967v2", + "title": "CO diffusion and desorption kinetics in CO$_2$ ices", + "abstract": "Diffusion of species in icy dust grain mantles is a fundamental process that\nshapes the chemistry of interstellar regions; yet measurements of diffusion in\ninterstellar ice analogs are scarce. Here we present measurements of CO\ndiffusion into CO$_2$ ice at low temperatures (T=11--23~K) using CO$_2$\nlongitudinal optical (LO) phonon modes to monitor the level of mixing of\ninitially layered ices. We model the diffusion kinetics using Fick's second law\nand find the temperature dependent diffusion coefficients are well fit by an\nArrhenius equation giving a diffusion barrier of 300 $\\pm$ 40 K. The low\nbarrier along with the diffusion kinetics through isotopically labeled layers\nsuggest that CO diffuses through CO$_2$ along pore surfaces rather than through\nbulk diffusion. In complementary experiments, we measure the desorption energy\nof CO from CO$_2$ ices deposited at 11-50 K by temperature-programmed\ndesorption (TPD) and find that the desorption barrier ranges from 1240 $\\pm$ 90\nK to 1410 $\\pm$ 70 K depending on the CO$_2$ deposition temperature and\nresultant ice porosity. The measured CO-CO$_2$ desorption barriers demonstrate\nthat CO binds equally well to CO$_2$ and H$_2$O ices when both are compact. The\nCO-CO$_2$ diffusion-desorption barrier ratio ranges from 0.21-0.24 dependent on\nthe binding environment during diffusion. The diffusion-desorption ratio is\nconsistent with the above hypothesis that the observed diffusion is a surface\nprocess and adds to previous experimental evidence on diffusion in water ice\nthat suggests surface diffusion is important to the mobility of molecules\nwithin interstellar ices.", + "authors": "Ilsa R. Cooke, Karin I. \u00d6berg, Edith C. Fayolle, Zoe Peeler, Jennifer B. Bergner", + "published": "2017-11-27", + "updated": "2017-12-18", + "primary_cat": "astro-ph.GA", + "cats": [ + "astro-ph.GA" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2307.06272v1", + "title": "Exposing the Fake: Effective Diffusion-Generated Images Detection", + "abstract": "Image synthesis has seen significant advancements with the advent of\ndiffusion-based generative models like Denoising Diffusion Probabilistic Models\n(DDPM) and text-to-image diffusion models. Despite their efficacy, there is a\ndearth of research dedicated to detecting diffusion-generated images, which\ncould pose potential security and privacy risks. This paper addresses this gap\nby proposing a novel detection method called Stepwise Error for\nDiffusion-generated Image Detection (SeDID). Comprising statistical-based\n$\\text{SeDID}_{\\text{Stat}}$ and neural network-based\n$\\text{SeDID}_{\\text{NNs}}$, SeDID exploits the unique attributes of diffusion\nmodels, namely deterministic reverse and deterministic denoising computation\nerrors. Our evaluations demonstrate SeDID's superior performance over existing\nmethods when applied to diffusion models. Thus, our work makes a pivotal\ncontribution to distinguishing diffusion model-generated images, marking a\nsignificant step in the domain of artificial intelligence security.", + "authors": "Ruipeng Ma, Jinhao Duan, Fei Kong, Xiaoshuang Shi, Kaidi Xu", + "published": "2023-07-12", + "updated": "2023-07-12", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV", + "cs.CR", + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2404.12761v1", + "title": "Universality of giant diffusion in tilted periodic potentials", + "abstract": "Giant diffusion, where the diffusion coefficient of a Brownian particle in a\nperiodic potential with an external force is significantly enhanced by the\nexternal force, is a non-trivial non-equilibrium phenomenon. We propose a\nsimple stochastic model of giant diffusion, which is based on a biased\ncontinuous-time random walk (CTRW). In this model, we introduce a flight time\nin the biased CTRW. We derive the diffusion coefficients of this model by the\nrenewal theory and find that there is a maximum diffusion coefficient when the\nbias is changed. Giant diffusion is universally observed in the sense that\nthere is a peak of the diffusion coefficient for any tilted periodic potentials\nand the degree of the diffusivity is greatly enhanced especially for\nlow-temperature regimes. The biased CTRW models with flight times are applied\nto diffusion under three tilted periodic potentials. Furthermore, the\ntemperature dependence of the maximum diffusion coefficient and the external\nforce that attains the maximum are presented for diffusion under a tilted\nsawtooth potential.", + "authors": "Kento Iida, Andreas Dechant, Takuma Akimoto", + "published": "2024-04-19", + "updated": "2024-04-19", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2207.09786v1", + "title": "Non-Uniform Diffusion Models", + "abstract": "Diffusion models have emerged as one of the most promising frameworks for\ndeep generative modeling. In this work, we explore the potential of non-uniform\ndiffusion models. We show that non-uniform diffusion leads to multi-scale\ndiffusion models which have similar structure to this of multi-scale\nnormalizing flows. We experimentally find that in the same or less training\ntime, the multi-scale diffusion model achieves better FID score than the\nstandard uniform diffusion model. More importantly, it generates samples $4.4$\ntimes faster in $128\\times 128$ resolution. The speed-up is expected to be\nhigher in higher resolutions where more scales are used. Moreover, we show that\nnon-uniform diffusion leads to a novel estimator for the conditional score\nfunction which achieves on par performance with the state-of-the-art\nconditional denoising estimator. Our theoretical and experimental findings are\naccompanied by an open source library MSDiff which can facilitate further\nresearch of non-uniform diffusion models.", + "authors": "Georgios Batzolis, Jan Stanczuk, Carola-Bibiane Sch\u00f6nlieb, Christian Etmann", + "published": "2022-07-20", + "updated": "2022-07-20", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1708.06890v1", + "title": "Collaborative Inference of Coexisting Information Diffusions", + "abstract": "Recently, \\textit{diffusion history inference} has become an emerging\nresearch topic due to its great benefits for various applications, whose\npurpose is to reconstruct the missing histories of information diffusion traces\naccording to incomplete observations. The existing methods, however, often\nfocus only on single information diffusion trace, while in a real-world social\nnetwork, there often coexist multiple information diffusions over the same\nnetwork. In this paper, we propose a novel approach called Collaborative\nInference Model (CIM) for the problem of the inference of coexisting\ninformation diffusions. By exploiting the synergism between the coexisting\ninformation diffusions, CIM holistically models multiple information diffusions\nas a sparse 4th-order tensor called Coexisting Diffusions Tensor (CDT) without\nany prior assumption of diffusion models, and collaboratively infers the\nhistories of the coexisting information diffusions via a low-rank approximation\nof CDT with a fusion of heterogeneous constraints generated from additional\ndata sources. To improve the efficiency, we further propose an optimal\nalgorithm called Time Window based Parallel Decomposition Algorithm (TWPDA),\nwhich can speed up the inference without compromise on the accuracy by\nutilizing the temporal locality of information diffusions. The extensive\nexperiments conducted on real world datasets and synthetic datasets verify the\neffectiveness and efficiency of CIM and TWPDA.", + "authors": "Yanchao Sun, Cong Qian, Ning Yang, Philip S. Yu", + "published": "2017-08-23", + "updated": "2017-08-23", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2212.10777v4", + "title": "Hierarchically branched diffusion models leverage dataset structure for class-conditional generation", + "abstract": "Class-labeled datasets, particularly those common in scientific domains, are\nrife with internal structure, yet current class-conditional diffusion models\nignore these relationships and implicitly diffuse on all classes in a flat\nfashion. To leverage this structure, we propose hierarchically branched\ndiffusion models as a novel framework for class-conditional generation.\nBranched diffusion models rely on the same diffusion process as traditional\nmodels, but learn reverse diffusion separately for each branch of a hierarchy.\nWe highlight several advantages of branched diffusion models over the current\nstate-of-the-art methods for class-conditional diffusion, including extension\nto novel classes in a continual-learning setting, a more sophisticated form of\nanalogy-based conditional generation (i.e. transmutation), and a novel\ninterpretability into the generation process. We extensively evaluate branched\ndiffusion models on several benchmark and large real-world scientific datasets\nspanning many data modalities.", + "authors": "Alex M. Tseng, Max Shen, Tommaso Biancalani, Gabriele Scalia", + "published": "2022-12-21", + "updated": "2024-02-01", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2304.01565v1", + "title": "A Survey on Graph Diffusion Models: Generative AI in Science for Molecule, Protein and Material", + "abstract": "Diffusion models have become a new SOTA generative modeling method in various\nfields, for which there are multiple survey works that provide an overall\nsurvey. With the number of articles on diffusion models increasing\nexponentially in the past few years, there is an increasing need for surveys of\ndiffusion models on specific fields. In this work, we are committed to\nconducting a survey on the graph diffusion models. Even though our focus is to\ncover the progress of diffusion models in graphs, we first briefly summarize\nhow other generative modeling methods are used for graphs. After that, we\nintroduce the mechanism of diffusion models in various forms, which facilitates\nthe discussion on the graph diffusion models. The applications of graph\ndiffusion models mainly fall into the category of AI-generated content (AIGC)\nin science, for which we mainly focus on how graph diffusion models are\nutilized for generating molecules and proteins but also cover other cases,\nincluding materials design. Moreover, we discuss the issue of evaluating\ndiffusion models in the graph domain and the existing challenges.", + "authors": "Mengchun Zhang, Maryam Qamar, Taegoo Kang, Yuna Jung, Chenshuang Zhang, Sung-Ho Bae, Chaoning Zhang", + "published": "2023-04-04", + "updated": "2023-04-04", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2305.09605v1", + "title": "Expressiveness Remarks for Denoising Diffusion Models and Samplers", + "abstract": "Denoising diffusion models are a class of generative models which have\nrecently achieved state-of-the-art results across many domains. Gradual noise\nis added to the data using a diffusion process, which transforms the data\ndistribution into a Gaussian. Samples from the generative model are then\nobtained by simulating an approximation of the time reversal of this diffusion\ninitialized by Gaussian samples. Recent research has explored adapting\ndiffusion models for sampling and inference tasks. In this paper, we leverage\nknown connections to stochastic control akin to the F\\\"ollmer drift to extend\nestablished neural network approximation results for the F\\\"ollmer drift to\ndenoising diffusion models and samplers.", + "authors": "Francisco Vargas, Teodora Reu, Anna Kerekes", + "published": "2023-05-16", + "updated": "2023-05-16", + "primary_cat": "stat.ML", + "cats": [ + "stat.ML", + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1308.3393v2", + "title": "Cosmology with matter diffusion", + "abstract": "We construct a viable cosmological model based on velocity diffusion of\nmatter particles. In order to ensure the conservation of the total\nenergy-momentum tensor in the presence of diffusion, we include a cosmological\nscalar field $\\phi$ which we identify with the dark energy component of the\nUniverse. The model is characterized by only one new degree of freedom, the\ndiffusion parameter $\\sigma$. The standard $\\Lambda$CDM model can be recovered\nby setting $\\sigma=0$. If diffusion takes place ($\\sigma >0$) the dynamics of\nthe matter and of the dark energy fields are coupled. We argue that the\nexistence of a diffusion mechanism in the Universe can serve as a theoretical\nmotivation for interacting models. We constrain the background dynamics of the\ndiffusion model with Supernovae, H(z) and BAO data. We also perform a\nperturbative analysis of this model in order to understand structure formation\nin the Universe. We calculate the impact of diffusion both on the CMB spectrum,\nwith particular attention to the integrated Sachs-Wolfe signal, and on the\nmatter power spectrum $P(k)$. The latter analysis places strong constraints on\nthe magnitude of the diffusion mechanism but does not rule out the model.", + "authors": "Simone Calogero, Hermano Velten", + "published": "2013-08-15", + "updated": "2013-10-29", + "primary_cat": "astro-ph.CO", + "cats": [ + "astro-ph.CO" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2202.05830v1", + "title": "Learning Fast Samplers for Diffusion Models by Differentiating Through Sample Quality", + "abstract": "Diffusion models have emerged as an expressive family of generative models\nrivaling GANs in sample quality and autoregressive models in likelihood scores.\nStandard diffusion models typically require hundreds of forward passes through\nthe model to generate a single high-fidelity sample. We introduce\nDifferentiable Diffusion Sampler Search (DDSS): a method that optimizes fast\nsamplers for any pre-trained diffusion model by differentiating through sample\nquality scores. We also present Generalized Gaussian Diffusion Models (GGDM), a\nfamily of flexible non-Markovian samplers for diffusion models. We show that\noptimizing the degrees of freedom of GGDM samplers by maximizing sample quality\nscores via gradient descent leads to improved sample quality. Our optimization\nprocedure backpropagates through the sampling process using the\nreparametrization trick and gradient rematerialization. DDSS achieves strong\nresults on unconditional image generation across various datasets (e.g., FID\nscores on LSUN church 128x128 of 11.6 with only 10 inference steps, and 4.82\nwith 20 steps, compared to 51.1 and 14.9 with strongest DDPM/DDIM baselines).\nOur method is compatible with any pre-trained diffusion model without\nfine-tuning or re-training required.", + "authors": "Daniel Watson, William Chan, Jonathan Ho, Mohammad Norouzi", + "published": "2022-02-11", + "updated": "2022-02-11", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1409.3132v1", + "title": "Front propagation in reaction-diffusion systems with anomalous diffusion", + "abstract": "A numerical study of the role of anomalous diffusion in front propagation in\nreaction-diffusion systems is presented. Three models of anomalous diffusion\nare considered: fractional diffusion, tempered fractional diffusion, and a\nmodel that combines fractional diffusion and regular diffusion. The reaction\nkinetics corresponds to a Fisher-Kolmogorov nonlinearity. The numerical method\nis based on a finite-difference operator splitting algorithm with an explicit\nEuler step for the time advance of the reaction kinetics, and a Crank-Nicholson\nsemi-implicit time step for the transport operator. The anomalous diffusion\noperators are discretized using an upwind, flux-conserving, Grunwald-Letnikov\nfinite-difference scheme applied to the regularized fractional derivatives.\nWith fractional diffusion of order $\\alpha$, fronts exhibit exponential\nacceleration, $a_L(t) \\sim e^{\\gamma t/\\alpha}$, and develop algebraic decaying\ntails, $\\phi \\sim 1/x^{\\alpha}$. In the case of tempered fractional diffusion,\nthis phenomenology prevails in the intermediate asymptotic regime\n $\\left(\\chi t \\right)^{1/\\alpha} \\ll x \\ll 1/\\lambda$, where $1/\\lambda$ is\nthe scale of the tempering. Outside this regime, i.e. for $x > 1/\\lambda$, the\ntail exhibits the tempered decay $\\phi \\sim e^{-\\lambda x}/x^{\\alpha+1}$, and\nthe front velocity approaches the terminal speed $v_*=\n\\left(\\gamma-\\lambda^\\alpha \\chi\\right)/ \\lambda$. Of particular interest is\nthe study of the interplay of regular and fractional diffusion. It is shown\nthat the main role of regular diffusion is to delay the onset of front\nacceleration. In particular, the crossover time, $t_c$, to transition to the\naccelerated fractional regime exhibits a logarithmic scaling of the form $t_c\n\\sim \\log \\left(\\chi_d/\\chi_f\\right)$ where $\\chi_d$ and $\\chi_f$ are the\nregular and fractional diffusivities.", + "authors": "D. del-Castillo-Negrete", + "published": "2014-09-10", + "updated": "2014-09-10", + "primary_cat": "nlin.PS", + "cats": [ + "nlin.PS", + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1906.02856v1", + "title": "Diffusion on dynamic contact networks with indirect transmission links", + "abstract": "Modelling diffusion processes on dynamic contact networks is an important\nresearch area for epidemiology, marketing, cybersecurity, and ecology. However,\ncurrent diffusion models cannot capture transmissions occurring for indirect\ninteractions. For example, an airborne infected individual releases infectious\nparticles at locations that can suspend in the air and infect susceptible\nindividuals arriving even after the infected individual left. Thus, current\ndiffusion models miss transmissions during indirect interactions. In this\nthesis, a novel diffusion model called the same place different time\ntransmission based diffusion (SPDT) is introduced to take into account the\ntransmissions through indirect interactions. The behaviour of SPDT diffusion is\nanalysed on real dynamic contact networks and a significant amplification in\ndiffusion dynamics is observed. The SPDT model also introduces some novel\nbehaviours different from current diffusion models. In this work, a new SPDT\ngraph model is also developed to generate synthetic traces to explore SPDT\ndiffusion in several scenarios. The analysis shows that the emergence of new\ndiffusion becomes common thanks to the inclusion of indirect transmissions\nwithin the SPDT model. This work finally investigates how diffusion can be\ncontrolled and develops new methods to hinder diffusion.", + "authors": "Md Shahzamal", + "published": "2019-06-07", + "updated": "2019-06-07", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "physics.soc-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1202.6521v1", + "title": "Coherence transition in degenerate diffusion equations with mean field coupling", + "abstract": "We introduce non-linear diffusion in a classical diffusion advection model\nwith non local aggregative coupling on the circle, that exhibits a transition\nfrom an uncoherent state to a coherent one when the coupling strength is\nincreased. We show first that all solutions of the equation converge to the set\nof equilibria, second that the set of equilibria undergoes a bifurcation\nrepresenting the transition to coherence when the coupling strength is\nincreased. These two properties are similar to the situation with linear\ndiffusion. Nevertheless nonlinear diffusion alters the transition scenari,\nwhich are different when the diffusion is sub-quadratic and when the diffusion\nis super-quadratic. When the diffusion is super-quadratic, it results in a\nmultistability region that preceeds the pitchfork bifurcation at which the\nuncoherent equilibrium looses stability. When the diffusion is quadratic the\npitchfork bifurcation at the onset of coherence is infinitely degenerate and a\ndisk of equilibria exist for the critical value of the coupling strength.\nAnother impact of nonlinear diffusion is that coherent equilibria become\nlocalized when advection is strong enough, a phenomenon that is preculded when\nthe diffusion is linear.", + "authors": "Khashayar Pakdaman, Xavier Pellegrin", + "published": "2012-02-29", + "updated": "2012-02-29", + "primary_cat": "nlin.AO", + "cats": [ + "nlin.AO", + "37N25, 92B25, 35Q35, 35K55, 37B25, 82C26" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/0805.0647v1", + "title": "Scaling of Rough Surfaces: Effects of Surface Diffusion on Growth and Roughness Exponents", + "abstract": "Random deposition model with surface diffusion over several next nearest\nneighbours is studied. The results agree with the results obtained by Family\nfor the case of nearest neighbour diffusion [F. Family, J. Phys. A 19(8), L441,\n1986]. However for larger diffusion steps, the growth exponent and the\nroughness exponent show interesting dependence on diffusion length.", + "authors": "Baisakhi Mal, Subhankar Ray, J. Shamanna", + "published": "2008-05-06", + "updated": "2008-05-06", + "primary_cat": "cond-mat.soft", + "cats": [ + "cond-mat.soft", + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2404.04629v1", + "title": "DifFUSER: Diffusion Model for Robust Multi-Sensor Fusion in 3D Object Detection and BEV Segmentation", + "abstract": "Diffusion models have recently gained prominence as powerful deep generative\nmodels, demonstrating unmatched performance across various domains. However,\ntheir potential in multi-sensor fusion remains largely unexplored. In this\nwork, we introduce DifFUSER, a novel approach that leverages diffusion models\nfor multi-modal fusion in 3D object detection and BEV map segmentation.\nBenefiting from the inherent denoising property of diffusion, DifFUSER is able\nto refine or even synthesize sensor features in case of sensor malfunction,\nthereby improving the quality of the fused output. In terms of architecture,\nour DifFUSER blocks are chained together in a hierarchical BiFPN fashion,\ntermed cMini-BiFPN, offering an alternative architecture for latent diffusion.\nWe further introduce a Gated Self-conditioned Modulated (GSM) latent diffusion\nmodule together with a Progressive Sensor Dropout Training (PSDT) paradigm,\ndesigned to add stronger conditioning to the diffusion process and robustness\nto sensor failures. Our extensive evaluations on the Nuscenes dataset reveal\nthat DifFUSER not only achieves state-of-the-art performance with a 69.1% mIOU\nin BEV map segmentation tasks but also competes effectively with leading\ntransformer-based fusion techniques in 3D object detection.", + "authors": "Duy-Tho Le, Hengcan Shi, Jianfei Cai, Hamid Rezatofighi", + "published": "2024-04-06", + "updated": "2024-04-06", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2312.14589v1", + "title": "Non-Denoising Forward-Time Diffusions", + "abstract": "The scope of this paper is generative modeling through diffusion processes.\nAn approach falling within this paradigm is the work of Song et al. (2021),\nwhich relies on a time-reversal argument to construct a diffusion process\ntargeting the desired data distribution. We show that the time-reversal\nargument, common to all denoising diffusion probabilistic modeling proposals,\nis not necessary. We obtain diffusion processes targeting the desired data\ndistribution by taking appropriate mixtures of diffusion bridges. The resulting\ntransport is exact by construction, allows for greater flexibility in choosing\nthe dynamics of the underlying diffusion, and can be approximated by means of a\nneural network via novel training objectives. We develop a unifying view of the\ndrift adjustments corresponding to our and to time-reversal approaches and make\nuse of this representation to inspect the inner workings of diffusion-based\ngenerative models. Finally, we leverage on scalable simulation and inference\ntechniques common in spatial statistics to move beyond fully factorial\ndistributions in the underlying diffusion dynamics. The methodological advances\ncontained in this work contribute toward establishing a general framework for\ngenerative modeling based on diffusion processes.", + "authors": "Stefano Peluchetti", + "published": "2023-12-22", + "updated": "2023-12-22", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "stat.ML" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2006.00003v1", + "title": "Coupling particle-based reaction-diffusion simulations with reservoirs mediated by reaction-diffusion PDEs", + "abstract": "Open biochemical systems of interacting molecules are ubiquitous in\nlife-related processes. However, established computational methodologies, like\nmolecular dynamics, are still mostly constrained to closed systems and\ntimescales too small to be relevant for life processes. Alternatively,\nparticle-based reaction-diffusion models are currently the most accurate and\ncomputationally feasible approach at these scales. Their efficiency lies in\nmodeling entire molecules as particles that can diffuse and interact with each\nother. In this work, we develop modeling and numerical schemes for\nparticle-based reaction-diffusion in an open setting, where the reservoirs are\nmediated by reaction-diffusion PDEs. We derive two important theoretical\nresults. The first one is the mean-field for open systems of diffusing\nparticles; the second one is the mean-field for a particle-based\nreaction-diffusion system with second-order reactions. We employ these two\nresults to develop a numerical scheme that consistently couples particle-based\nreaction-diffusion processes with reaction-diffusion PDEs. This allows modeling\nopen biochemical systems in contact with reservoirs that are time-dependent and\nspatially inhomogeneous, as in many relevant real-world applications.", + "authors": "Margarita Kostr\u00e9, Christof Sch\u00fctte, Frank No\u00e9, Mauricio J. del Razo", + "published": "2020-05-29", + "updated": "2020-05-29", + "primary_cat": "q-bio.QM", + "cats": [ + "q-bio.QM", + "physics.chem-ph", + "physics.comp-ph", + "92C40, 92C45, 60J70, 60Gxx, 70Lxx" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2211.08892v2", + "title": "Fast Graph Generation via Spectral Diffusion", + "abstract": "Generating graph-structured data is a challenging problem, which requires\nlearning the underlying distribution of graphs. Various models such as graph\nVAE, graph GANs, and graph diffusion models have been proposed to generate\nmeaningful and reliable graphs, among which the diffusion models have achieved\nstate-of-the-art performance. In this paper, we argue that running full-rank\ndiffusion SDEs on the whole graph adjacency matrix space hinders diffusion\nmodels from learning graph topology generation, and hence significantly\ndeteriorates the quality of generated graph data. To address this limitation,\nwe propose an efficient yet effective Graph Spectral Diffusion Model (GSDM),\nwhich is driven by low-rank diffusion SDEs on the graph spectrum space. Our\nspectral diffusion model is further proven to enjoy a substantially stronger\ntheoretical guarantee than standard diffusion models. Extensive experiments\nacross various datasets demonstrate that, our proposed GSDM turns out to be the\nSOTA model, by exhibiting both significantly higher generation quality and much\nless computational consumption than the baselines.", + "authors": "Tianze Luo, Zhanfeng Mo, Sinno Jialin Pan", + "published": "2022-11-16", + "updated": "2022-11-19", + "primary_cat": "cs.LG", + "cats": [ + "cs.LG", + "cs.AI" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1907.09989v1", + "title": "Rogue Heat and Diffusion Waves", + "abstract": "In this paper, we numerically show and discuss the existence and\ncharacteristics of rogue heat and diffusion waves. More specifically, we use\ntwo different nonlinear heat (diffusion) models and show that modulation\ninstability leads to the generation of unexpected and large fluctuations in the\nframe of these models. These fluctuations can be named as rogue heat\n(diffusion) waves. We discuss the properties and statistics of such rogue\nwaves. Our results can find many important applications in many branches such\nas the nonlinear heat transfer, turbulence, financial mathematics, chemical or\nbiological diffusion, nuclear reactions, subsurface water infiltration, and\npore water pressure diffusion modeled in the frame of nonlinear Terzaghi\nconsolidation models, just to name a few.", + "authors": "Cihan Bayindir", + "published": "2019-07-18", + "updated": "2019-07-18", + "primary_cat": "nlin.PS", + "cats": [ + "nlin.PS", + "physics.flu-dyn" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2306.07491v2", + "title": "Exact sharp-fronted solutions for nonlinear diffusion on evolving domains", + "abstract": "Models of diffusive processes that occur on evolving domains are frequently\nemployed to describe biological and physical phenomena, such as diffusion\nwithin expanding tissues or substrates. Previous investigations into these\nmodels either report numerical solutions or require an assumption of linear\ndiffusion to determine exact solutions. Unfortunately, numerical solutions do\nnot reveal the relationship between the model parameters and the solution\nfeatures. Additionally, experimental observations typically report the presence\nof sharp fronts, which are not captured by linear diffusion. Here we address\nboth limitations by presenting exact sharp-fronted solutions to a model of\ndegenerate nonlinear diffusion on a growing domain. We obtain the solution by\nidentifying a series of transformations that converts the model of a nonlinear\ndiffusive process on an evolving domain to a nonlinear diffusion equation on a\nfixed domain, which admits known exact solutions for certain choices of\ndiffusivity functions. We determine expressions for critical time scales and\ndomain growth rates such that the diffusive population never reaches the domain\nboundaries and hence the solution remains valid.", + "authors": "Stuart T. Johnston, Matthew J. Simpson", + "published": "2023-06-13", + "updated": "2023-10-06", + "primary_cat": "q-bio.PE", + "cats": [ + "q-bio.PE" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/0907.0417v1", + "title": "Microscopic origin of the jump diffusion model", + "abstract": "The present paper is aimed at studying the microscopic origin of the jump\ndiffusion. Starting from the $N$-body Liouville equation and making only the\nassumption that molecular reorientation is overdamped, we derive and solve the\nnew (hereafter generalized diffusion) equation. This is the most general\nequation which governs orientational relaxation of an equilibrium molecular\nensemble in the hindered rotation limit and in the long time limit. The\ngeneralized diffusion equation is an extension of the small-angle diffusion\nequation beyond the impact approximation. We establish the conditions under\nwhich the generalized diffusion equation can be identified with the jump\ndiffusion equation, and also discuss the similarities and differences between\nthe two approaches.", + "authors": "M. F. Gelin, D. S. Kosov", + "published": "2009-07-02", + "updated": "2009-07-02", + "primary_cat": "cond-mat.stat-mech", + "cats": [ + "cond-mat.stat-mech" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1712.02290v2", + "title": "Effects of nongaussian diffusion on \"isotropic diffusion measurements'': an ex-vivo microimaging and simulation study", + "abstract": "Designing novel diffusion-weighted pulse sequences to probe tissue\nmicrostructure beyond the conventional Stejskal-Tanner family is currently of\nbroad interest. One such technique, multidimensional diffusion MRI, has been\nrecently proposed to afford model-free decomposition of diffusion signal\nkurtosis into terms originating from either ensemble variance of isotropic\ndiffusivity or microscopic diffusion anisotropy. This ability rests on the\nassumption that diffusion can be described as a sum of multiple Gaussian\ncompartments, but this is often not strictly fulfilled. The effects of\nnongaussian diffusion on single shot isotropic diffusion sequences were first\nconsidered in detail by de Swiet and Mitra in 1996. They showed theoretically\nthat anisotropic compartments lead to anisotropic time dependence of the\ndiffusion tensors, which causes the measured isotropic diffusivity to depend on\ngradient frame orientation. Here we show how such deviations from the multiple\nGaussian compartments assumption conflates orientation dispersion with ensemble\nvariance in isotropic diffusivity. Second, we consider additional contributions\nto the apparent variance in isotropic diffusivity arising due to\nintracompartmental kurtosis. These will likewise depend on gradient frame\norientation. We illustrate the potential importance of these confounds with\nanalytical expressions, numerical simulations in simple model geometries, and\nmicroimaging experiments in fixed spinal cord using isotropic diffusion\nencoding waveforms with 7.5 ms duration and 3000 mT/m maximum amplitude.", + "authors": "Sune N\u00f8rh\u00f8j Jespersen, Jonas Lynge Olesen, Andrada Ianu\u015f, Noam Shemesh", + "published": "2017-12-06", + "updated": "2019-02-04", + "primary_cat": "physics.bio-ph", + "cats": [ + "physics.bio-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1807.03744v2", + "title": "Enhanced Diffusivity in Perturbed Senile Reinforced Random Walk Models", + "abstract": "We consider diffusivity of random walks with transition probabilities\ndepending on the number of consecutive traversals of the last traversed edge,\nthe so called senile reinforced random walk (SeRW). In one dimension, the walk\nis known to be sub-diffusive with identity reinforcement function. We perturb\nthe model by introducing a small probability $\\delta$ of escaping the last\ntraversed edge at each step. The perturbed SeRW model is diffusive for any\n$\\delta >0 $, with enhanced diffusivity ($\\gg O(\\delta^2)$) in the small\n$\\delta$ regime. We further study stochastically perturbed SeRW models by\nhaving the last edge escape probability of the form $\\delta\\, \\xi_n$ with\n$\\xi_n$'s being independent random variables. Enhanced diffusivity in such\nmodels are logarithmically close to the so called residual diffusivity\n(positive in the zero $\\delta$ limit), with diffusivity between\n$O\\left(\\frac{1}{|\\log\\delta |}\\right)$ and\n$O\\left(\\frac{1}{\\log|\\log\\delta|}\\right)$. Finally, we generalize our results\nto higher dimensions where the unperturbed model is already diffusive. The\nenhanced diffusivity can be as much as $O(\\log^{-2}\\delta)$.", + "authors": "Thu Dinh, Jack Xin", + "published": "2018-07-10", + "updated": "2020-03-16", + "primary_cat": "math.PR", + "cats": [ + "math.PR", + "60G50, 60H30, 58J37" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/1906.02405v1", + "title": "Indirect interactions influence contact network structure and diffusion dynamics", + "abstract": "Interaction patterns at the individual level influence the behaviour of\ndiffusion over contact networks. Most of the current diffusion models only\nconsider direct interactions among individuals to build underlying infectious\nitems transmission networks. However, delayed indirect interactions, where a\nsusceptible individual interacts with infectious items after the infected\nindividual has left the interaction space, can also cause transmission events.\nWe define a diffusion model called the same place different time transmission\n(SPDT) based diffusion that considers transmission links for these indirect\ninteractions. Our SPDT model changes the network dynamics where the\nconnectivity among individuals varies with the decay rates of link infectivity.\nWe investigate SPDT diffusion behaviours by simulating airborne disease\nspreading on data-driven contact networks. The SPDT model significantly\nincreases diffusion dynamics (particularly for networks with low link densities\nwhere indirect interactions create new infection pathways) and is capable of\nproducing realistic disease reproduction number. Our results show that the SPDT\nmodel is significantly more likely to lead to outbreaks compared to current\ndiffusion models with direct interactions. We find that the diffusion dynamics\nwith including indirect links are not reproducible by the current models,\nhighlighting the importance of the indirect links for predicting outbreaks.", + "authors": "Md Shahzamal, Raja Jurdak, Bernard Mans, Frank de Hoog", + "published": "2019-06-06", + "updated": "2019-06-06", + "primary_cat": "cs.SI", + "cats": [ + "cs.SI", + "physics.soc-ph" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2303.09295v1", + "title": "DIRE for Diffusion-Generated Image Detection", + "abstract": "Diffusion models have shown remarkable success in visual synthesis, but have\nalso raised concerns about potential abuse for malicious purposes. In this\npaper, we seek to build a detector for telling apart real images from\ndiffusion-generated images. We find that existing detectors struggle to detect\nimages generated by diffusion models, even if we include generated images from\na specific diffusion model in their training data. To address this issue, we\npropose a novel image representation called DIffusion Reconstruction Error\n(DIRE), which measures the error between an input image and its reconstruction\ncounterpart by a pre-trained diffusion model. We observe that\ndiffusion-generated images can be approximately reconstructed by a diffusion\nmodel while real images cannot. It provides a hint that DIRE can serve as a\nbridge to distinguish generated and real images. DIRE provides an effective way\nto detect images generated by most diffusion models, and it is general for\ndetecting generated images from unseen diffusion models and robust to various\nperturbations. Furthermore, we establish a comprehensive diffusion-generated\nbenchmark including images generated by eight diffusion models to evaluate the\nperformance of diffusion-generated image detectors. Extensive experiments on\nour collected benchmark demonstrate that DIRE exhibits superiority over\nprevious generated-image detectors. The code and dataset are available at\nhttps://github.com/ZhendongWang6/DIRE.", + "authors": "Zhendong Wang, Jianmin Bao, Wengang Zhou, Weilun Wang, Hezhen Hu, Hong Chen, Houqiang Li", + "published": "2023-03-16", + "updated": "2023-03-16", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + }, + { + "url": "http://arxiv.org/abs/2404.08926v2", + "title": "Diffusion Models Meet Remote Sensing: Principles, Methods, and Perspectives", + "abstract": "As a newly emerging advance in deep generative models, diffusion models have\nachieved state-of-the-art results in many fields, including computer vision,\nnatural language processing, and molecule design. The remote sensing community\nhas also noticed the powerful ability of diffusion models and quickly applied\nthem to a variety of tasks for image processing. Given the rapid increase in\nresearch on diffusion models in the field of remote sensing, it is necessary to\nconduct a comprehensive review of existing diffusion model-based remote sensing\npapers, to help researchers recognize the potential of diffusion models and\nprovide some directions for further exploration. Specifically, this paper first\nintroduces the theoretical background of diffusion models, and then\nsystematically reviews the applications of diffusion models in remote sensing,\nincluding image generation, enhancement, and interpretation. Finally, the\nlimitations of existing remote sensing diffusion models and worthy research\ndirections for further exploration are discussed and summarized.", + "authors": "Yidan Liu, Jun Yue, Shaobo Xia, Pedram Ghamisi, Weiying Xie, Leyuan Fang", + "published": "2024-04-13", + "updated": "2024-04-17", + "primary_cat": "cs.CV", + "cats": [ + "cs.CV" + ], + "category": "Diffusion AND Model" + } +] \ No newline at end of file