svjack commited on
Commit
fc791e3
·
verified ·
1 Parent(s): 3df8437

Upload image_netayume_lumina_t2i.json

Browse files
Files changed (1) hide show
  1. image_netayume_lumina_t2i.json +597 -0
image_netayume_lumina_t2i.json ADDED
@@ -0,0 +1,597 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "9ae6082b-c7f4-433c-9971-7a8f65a3ea65",
3
+ "revision": 0,
4
+ "last_node_id": 22,
5
+ "last_link_id": 21,
6
+ "nodes": [
7
+ {
8
+ "id": 13,
9
+ "type": "EmptySD3LatentImage",
10
+ "pos": [
11
+ 70,
12
+ 390
13
+ ],
14
+ "size": [
15
+ 315,
16
+ 106
17
+ ],
18
+ "flags": {},
19
+ "order": 0,
20
+ "mode": 0,
21
+ "inputs": [],
22
+ "outputs": [
23
+ {
24
+ "name": "LATENT",
25
+ "type": "LATENT",
26
+ "slot_index": 0,
27
+ "links": [
28
+ 17
29
+ ]
30
+ }
31
+ ],
32
+ "properties": {
33
+ "cnr_id": "comfy-core",
34
+ "ver": "0.3.64",
35
+ "Node name for S&R": "EmptySD3LatentImage"
36
+ },
37
+ "widgets_values": [
38
+ 1024,
39
+ 1024,
40
+ 1
41
+ ]
42
+ },
43
+ {
44
+ "id": 17,
45
+ "type": "MarkdownNote",
46
+ "pos": [
47
+ 60,
48
+ 560
49
+ ],
50
+ "size": [
51
+ 340,
52
+ 100
53
+ ],
54
+ "flags": {},
55
+ "order": 1,
56
+ "mode": 0,
57
+ "inputs": [],
58
+ "outputs": [],
59
+ "title": "Note: Image size",
60
+ "properties": {},
61
+ "widgets_values": [
62
+ "This model is best at 1024x1024, but you can try higher like 1024x1536"
63
+ ],
64
+ "color": "#432",
65
+ "bgcolor": "#653"
66
+ },
67
+ {
68
+ "id": 11,
69
+ "type": "ModelSamplingAuraFlow",
70
+ "pos": [
71
+ 890,
72
+ 170
73
+ ],
74
+ "size": [
75
+ 310,
76
+ 60
77
+ ],
78
+ "flags": {},
79
+ "order": 6,
80
+ "mode": 0,
81
+ "inputs": [
82
+ {
83
+ "name": "model",
84
+ "type": "MODEL",
85
+ "link": 21
86
+ }
87
+ ],
88
+ "outputs": [
89
+ {
90
+ "name": "MODEL",
91
+ "type": "MODEL",
92
+ "slot_index": 0,
93
+ "links": [
94
+ 13
95
+ ]
96
+ }
97
+ ],
98
+ "properties": {
99
+ "cnr_id": "comfy-core",
100
+ "ver": "0.3.64",
101
+ "Node name for S&R": "ModelSamplingAuraFlow"
102
+ },
103
+ "widgets_values": [
104
+ 4
105
+ ]
106
+ },
107
+ {
108
+ "id": 3,
109
+ "type": "KSampler",
110
+ "pos": [
111
+ 890,
112
+ 270
113
+ ],
114
+ "size": [
115
+ 315,
116
+ 262
117
+ ],
118
+ "flags": {},
119
+ "order": 9,
120
+ "mode": 0,
121
+ "inputs": [
122
+ {
123
+ "name": "model",
124
+ "type": "MODEL",
125
+ "link": 13
126
+ },
127
+ {
128
+ "name": "positive",
129
+ "type": "CONDITIONING",
130
+ "link": 4
131
+ },
132
+ {
133
+ "name": "negative",
134
+ "type": "CONDITIONING",
135
+ "link": 6
136
+ },
137
+ {
138
+ "name": "latent_image",
139
+ "type": "LATENT",
140
+ "link": 17
141
+ }
142
+ ],
143
+ "outputs": [
144
+ {
145
+ "name": "LATENT",
146
+ "type": "LATENT",
147
+ "slot_index": 0,
148
+ "links": [
149
+ 14
150
+ ]
151
+ }
152
+ ],
153
+ "properties": {
154
+ "cnr_id": "comfy-core",
155
+ "ver": "0.3.64",
156
+ "Node name for S&R": "KSampler"
157
+ },
158
+ "widgets_values": [
159
+ 45043951788451,
160
+ "randomize",
161
+ 30,
162
+ 4,
163
+ "res_multistep",
164
+ "simple",
165
+ 1
166
+ ]
167
+ },
168
+ {
169
+ "id": 8,
170
+ "type": "VAEDecode",
171
+ "pos": [
172
+ 890,
173
+ 580
174
+ ],
175
+ "size": [
176
+ 210,
177
+ 46
178
+ ],
179
+ "flags": {},
180
+ "order": 10,
181
+ "mode": 0,
182
+ "inputs": [
183
+ {
184
+ "name": "samples",
185
+ "type": "LATENT",
186
+ "link": 14
187
+ },
188
+ {
189
+ "name": "vae",
190
+ "type": "VAE",
191
+ "link": 8
192
+ }
193
+ ],
194
+ "outputs": [
195
+ {
196
+ "name": "IMAGE",
197
+ "type": "IMAGE",
198
+ "slot_index": 0,
199
+ "links": [
200
+ 16
201
+ ]
202
+ }
203
+ ],
204
+ "properties": {
205
+ "cnr_id": "comfy-core",
206
+ "ver": "0.3.64",
207
+ "Node name for S&R": "VAEDecode"
208
+ },
209
+ "widgets_values": []
210
+ },
211
+ {
212
+ "id": 9,
213
+ "type": "SaveImage",
214
+ "pos": [
215
+ 1230,
216
+ 170
217
+ ],
218
+ "size": [
219
+ 820,
220
+ 850
221
+ ],
222
+ "flags": {},
223
+ "order": 11,
224
+ "mode": 0,
225
+ "inputs": [
226
+ {
227
+ "name": "images",
228
+ "type": "IMAGE",
229
+ "link": 16
230
+ }
231
+ ],
232
+ "outputs": [],
233
+ "properties": {
234
+ "cnr_id": "comfy-core",
235
+ "ver": "0.3.64"
236
+ },
237
+ "widgets_values": [
238
+ "NetaYume_Lumina_3.5"
239
+ ]
240
+ },
241
+ {
242
+ "id": 19,
243
+ "type": "MarkdownNote",
244
+ "pos": [
245
+ 1240,
246
+ 30
247
+ ],
248
+ "size": [
249
+ 450,
250
+ 88
251
+ ],
252
+ "flags": {},
253
+ "order": 2,
254
+ "mode": 0,
255
+ "inputs": [],
256
+ "outputs": [],
257
+ "title": "Note: Output files",
258
+ "properties": {},
259
+ "widgets_values": [
260
+ "The output image will be saved in the `ComfyUI/outputs` folder.q"
261
+ ],
262
+ "color": "#432",
263
+ "bgcolor": "#653"
264
+ },
265
+ {
266
+ "id": 7,
267
+ "type": "CLIPTextEncode",
268
+ "pos": [
269
+ 430.2906415183897,
270
+ 432.95603007427013
271
+ ],
272
+ "size": [
273
+ 425.27801513671875,
274
+ 180.6060791015625
275
+ ],
276
+ "flags": {},
277
+ "order": 8,
278
+ "mode": 0,
279
+ "inputs": [
280
+ {
281
+ "name": "clip",
282
+ "type": "CLIP",
283
+ "link": 5
284
+ }
285
+ ],
286
+ "outputs": [
287
+ {
288
+ "name": "CONDITIONING",
289
+ "type": "CONDITIONING",
290
+ "slot_index": 0,
291
+ "links": [
292
+ 6
293
+ ]
294
+ }
295
+ ],
296
+ "title": "CLIP Text Encode (Negative Prompt)",
297
+ "properties": {
298
+ "cnr_id": "comfy-core",
299
+ "ver": "0.3.64",
300
+ "Node name for S&R": "CLIPTextEncode"
301
+ },
302
+ "widgets_values": [
303
+ "You are an assistant designed to generate low-quality images based on textual prompts <Prompt Start> blurry, worst quality, low quality, jpeg artifacts, signature, watermark, username, error, deformed hands, bad anatomy, extra limbs, poorly drawn hands, poorly drawn face, mutation, deformed, extra eyes, extra arms, extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed, bad proportions, missing arms, missing legs, extra digit, fewer digits, cropped"
304
+ ],
305
+ "color": "#223",
306
+ "bgcolor": "#335"
307
+ },
308
+ {
309
+ "id": 18,
310
+ "type": "MarkdownNote",
311
+ "pos": [
312
+ 776.2325267759479,
313
+ -97.3529427516946
314
+ ],
315
+ "size": [
316
+ 440,
317
+ 120
318
+ ],
319
+ "flags": {},
320
+ "order": 3,
321
+ "mode": 0,
322
+ "inputs": [],
323
+ "outputs": [],
324
+ "title": "Note: Prompt",
325
+ "properties": {},
326
+ "widgets_values": [
327
+ "Check the prompt book [here](https://nieta-art.feishu.cn/wiki/RY3GwpT59icIQlkWXEfcCqIMnQd)\n\nYou should keep the prefix part fixed until the **Prompt Start** tag\n\n@whatever in the prompt is for artist tags, such as @comfyanonymous\n\nYou can find more artist tags [here](https://gumgum10.github.io/gumgum.github.io/)\n"
328
+ ],
329
+ "color": "#432",
330
+ "bgcolor": "#653"
331
+ },
332
+ {
333
+ "id": 4,
334
+ "type": "CheckpointLoaderSimple",
335
+ "pos": [
336
+ -76.02943362457621,
337
+ -70.03856573282903
338
+ ],
339
+ "size": [
340
+ 315,
341
+ 98
342
+ ],
343
+ "flags": {},
344
+ "order": 4,
345
+ "mode": 0,
346
+ "inputs": [],
347
+ "outputs": [
348
+ {
349
+ "name": "MODEL",
350
+ "type": "MODEL",
351
+ "slot_index": 0,
352
+ "links": [
353
+ 21
354
+ ]
355
+ },
356
+ {
357
+ "name": "CLIP",
358
+ "type": "CLIP",
359
+ "slot_index": 1,
360
+ "links": [
361
+ 3,
362
+ 5
363
+ ]
364
+ },
365
+ {
366
+ "name": "VAE",
367
+ "type": "VAE",
368
+ "slot_index": 2,
369
+ "links": [
370
+ 8
371
+ ]
372
+ }
373
+ ],
374
+ "properties": {
375
+ "cnr_id": "comfy-core",
376
+ "ver": "0.3.64",
377
+ "Node name for S&R": "CheckpointLoaderSimple",
378
+ "models": [
379
+ {
380
+ "name": "NetaYumev35_pretrained_all_in_one.safetensors",
381
+ "url": "https://huggingface.co/duongve/NetaYume-Lumina-Image-2.0/resolve/main/NetaYumev35_pretrained_all_in_one.safetensors",
382
+ "directory": "checkpoints"
383
+ }
384
+ ]
385
+ },
386
+ "widgets_values": [
387
+ "NetaYumev35_pretrained_all_in_one.safetensors"
388
+ ]
389
+ },
390
+ {
391
+ "id": 16,
392
+ "type": "MarkdownNote",
393
+ "pos": [
394
+ -380,
395
+ 160
396
+ ],
397
+ "size": [
398
+ 420,
399
+ 400
400
+ ],
401
+ "flags": {},
402
+ "order": 5,
403
+ "mode": 0,
404
+ "inputs": [],
405
+ "outputs": [],
406
+ "title": "Note: About NetaYume Lumina",
407
+ "properties": {},
408
+ "widgets_values": [
409
+ "## About NetaYume Lumina\n\n[NetaYume Lumina](https://civitai.com/models/1790792) is a **text-to-image** model fine-tuned from [Neta Lumina](https://huggingface.co/neta-art/Neta-Lumina), a high-quality anime-style image generation model developed by [**Neta.art Lab**](https://huggingface.co/neta-art). It builds upon [**Lumina-Image-2.0**,](https://huggingface.co/Alpha-VLLM/Lumina-Image-2.0) an open-source base model released by the [**Alpha-VLLM team**](https://huggingface.co/Alpha-VLLM) at Shanghai AI Laboratory.\n\n\n- [Style Reference Sheet](https://neta-lumina-style.tz03.xyz/)\n- [Prompt book](https://nieta-art.feishu.cn/wiki/RY3GwpT59icIQlkWXEfcCqIMnQd)\n\n\n**Key Features:**\n\n- **High-Quality Anime Generation:** Generates detailed anime-style images with sharp outlines, vibrant colors, and smooth shading.\n- **Improved Character Understanding:** Better captures characters, especially those from the Danbooru dataset, resulting in more coherent and accurate character representations.\n- **Enhanced Fine Details:** Accurately generates accessories, clothing textures, hairstyles, and background elements with greater clarity.\n\n\n## Model link\n\n\nDownload this file: [NetaYumev35_pretrained_all_in_one.safetensors](https://huggingface.co/duongve/NetaYume-Lumina-Image-2.0/resolve/main/NetaYumev35_pretrained_all_in_one.safetensors) \n\nThen place it in the **ComfyUI/models/checkpoints** folder."
410
+ ],
411
+ "color": "#432",
412
+ "bgcolor": "#653"
413
+ },
414
+ {
415
+ "id": 6,
416
+ "type": "CLIPTextEncode",
417
+ "pos": [
418
+ 420,
419
+ 200
420
+ ],
421
+ "size": [
422
+ 423.83001708984375,
423
+ 177.11770629882812
424
+ ],
425
+ "flags": {},
426
+ "order": 7,
427
+ "mode": 0,
428
+ "inputs": [
429
+ {
430
+ "name": "clip",
431
+ "type": "CLIP",
432
+ "link": 3
433
+ }
434
+ ],
435
+ "outputs": [
436
+ {
437
+ "name": "CONDITIONING",
438
+ "type": "CONDITIONING",
439
+ "slot_index": 0,
440
+ "links": [
441
+ 4
442
+ ]
443
+ }
444
+ ],
445
+ "title": "CLIP Text Encode (Positive Prompt)",
446
+ "properties": {
447
+ "cnr_id": "comfy-core",
448
+ "ver": "0.3.64",
449
+ "Node name for S&R": "CLIPTextEncode"
450
+ },
451
+ "widgets_values": [
452
+ "You are an assistant designed to generate high quality anime images based on textual prompts. <Prompt Start>\n\nanime style, 1boy,\n\nIn the image, Xiang is seated at a wooden table, deeply engaged in writing or studying. Xiang is wearing a light gray, long-sleeved sweater that appears comfortable and casual, suitable for a relaxed indoor setting. The sweater has a simple, round neckline, adding to its understated elegance.\n\nXiang is holding a pen in their right hand, poised over an open notebook or book that lies on the table. The notebook has blank pages, suggesting that Xiang might be taking notes, writing, or perhaps brainstorming ideas. The focus on the notebook and the pen indicates a moment of concentration and creativity.\n\nThe background of the image provides a warm and inviting atmosphere. Behind Xiang, there is a wooden dresser or cabinet, which adds a touch of rustic charm to the scene. On top of the dresser, there is a vase filled with vibrant red flowers, likely roses, which bring a splash of color and life to the setting. The flowers not only enhance the aesthetic appeal but also suggest a connection to nature and beauty.\n\nTo the left side of the image, there is a window with sheer curtains that allow soft, natural light to filter into the room. The light creates a serene and peaceful ambiance, ideal for activities that require focus and calmness. The curtains are gently draped, adding a sense of softness and tranquility to the environment.\n\nOn the wall above the dresser, there is a framed picture or artwork, though the details of the image are not clear. This piece of wall decor contributes to the overall homely and personalized feel of the space.\n\nOverall, the image captures a moment of quiet reflection and intellectual engagement, with Xiang immersed in their work amidst a cozy and aesthetically pleasing environment. "
453
+ ],
454
+ "color": "#232",
455
+ "bgcolor": "#353"
456
+ }
457
+ ],
458
+ "links": [
459
+ [
460
+ 3,
461
+ 4,
462
+ 1,
463
+ 6,
464
+ 0,
465
+ "CLIP"
466
+ ],
467
+ [
468
+ 4,
469
+ 6,
470
+ 0,
471
+ 3,
472
+ 1,
473
+ "CONDITIONING"
474
+ ],
475
+ [
476
+ 5,
477
+ 4,
478
+ 1,
479
+ 7,
480
+ 0,
481
+ "CLIP"
482
+ ],
483
+ [
484
+ 6,
485
+ 7,
486
+ 0,
487
+ 3,
488
+ 2,
489
+ "CONDITIONING"
490
+ ],
491
+ [
492
+ 8,
493
+ 4,
494
+ 2,
495
+ 8,
496
+ 1,
497
+ "VAE"
498
+ ],
499
+ [
500
+ 13,
501
+ 11,
502
+ 0,
503
+ 3,
504
+ 0,
505
+ "MODEL"
506
+ ],
507
+ [
508
+ 14,
509
+ 3,
510
+ 0,
511
+ 8,
512
+ 0,
513
+ "LATENT"
514
+ ],
515
+ [
516
+ 16,
517
+ 8,
518
+ 0,
519
+ 9,
520
+ 0,
521
+ "IMAGE"
522
+ ],
523
+ [
524
+ 17,
525
+ 13,
526
+ 0,
527
+ 3,
528
+ 3,
529
+ "LATENT"
530
+ ],
531
+ [
532
+ 21,
533
+ 4,
534
+ 0,
535
+ 11,
536
+ 0,
537
+ "MODEL"
538
+ ]
539
+ ],
540
+ "groups": [
541
+ {
542
+ "id": 1,
543
+ "title": "Step1 - Load model",
544
+ "bounding": [
545
+ 60,
546
+ 130,
547
+ 340,
548
+ 180
549
+ ],
550
+ "color": "#3f789e",
551
+ "font_size": 24,
552
+ "flags": {}
553
+ },
554
+ {
555
+ "id": 2,
556
+ "title": "Step2 - Image size",
557
+ "bounding": [
558
+ 60,
559
+ 320,
560
+ 340,
561
+ 190
562
+ ],
563
+ "color": "#3f789e",
564
+ "font_size": 24,
565
+ "flags": {}
566
+ },
567
+ {
568
+ "id": 3,
569
+ "title": "Step3 - Prompt",
570
+ "bounding": [
571
+ 410,
572
+ 130,
573
+ 445.27801513671875,
574
+ 474.2060852050781
575
+ ],
576
+ "color": "#3f789e",
577
+ "font_size": 24,
578
+ "flags": {}
579
+ }
580
+ ],
581
+ "config": {},
582
+ "extra": {
583
+ "ds": {
584
+ "scale": 0.9469714447490578,
585
+ "offset": [
586
+ 67.33113179192428,
587
+ -57.64869734958739
588
+ ]
589
+ },
590
+ "frontendVersion": "1.28.7",
591
+ "VHS_latentpreview": false,
592
+ "VHS_latentpreviewrate": 0,
593
+ "VHS_MetadataImage": true,
594
+ "VHS_KeepIntermediate": true
595
+ },
596
+ "version": 0.4
597
+ }