openfree commited on
Commit
0e9722e
·
verified ·
1 Parent(s): 8360e3e

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -1625
app-backup.py DELETED
@@ -1,1625 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import HfApi, create_repo
3
- from git import Repo
4
- import uuid
5
- from slugify import slugify
6
- import os
7
- import sys
8
- import json
9
- import argparse
10
- import subprocess
11
- import tempfile
12
- import textwrap
13
- import requests
14
- import shutil
15
- import time
16
- from pathlib import Path
17
- from typing import Optional, Dict, List, Tuple
18
-
19
- # ========== LFS 처리 함수 (첫 번째 코드에서) ========== #
20
- def is_lfs_pointer_file(filepath):
21
- """Check if a file is a Git LFS pointer file."""
22
- # Initialize analysis
23
- analysis = {}
24
-
25
- try:
26
- with open(filepath, 'rb') as f:
27
- header = f.read(100)
28
- return header.startswith(b'version https://git-lfs.github.com/spec/v1')
29
- except:
30
- return False
31
-
32
- def remove_lfs_files(folder):
33
- """Remove all LFS pointer files from the repository."""
34
- removed_files = []
35
- for root, dirs, files in os.walk(folder):
36
- # Skip .git directory
37
- if '.git' in root:
38
- continue
39
-
40
- for file in files:
41
- filepath = os.path.join(root, file)
42
- if is_lfs_pointer_file(filepath):
43
- os.remove(filepath)
44
- removed_files.append(filepath.replace(folder + os.sep, ''))
45
-
46
- return removed_files
47
-
48
- # ========== Repository 분석 함수 (두 번째 코드에서) ========== #
49
- def analyze_repository(src_path: Path) -> Dict:
50
- """레포지토리 구조와 내용을 분석하여 정보 추출"""
51
- analysis = {
52
- "has_requirements": False,
53
- "has_readme": False,
54
- "has_setup_py": False,
55
- "main_language": "python",
56
- "key_files": [],
57
- "dependencies": [],
58
- "description": "",
59
- "installation_steps": [],
60
- "usage_examples": [],
61
- "model_files": [],
62
- "data_files": [],
63
- "config_files": [],
64
- "entry_points": []
65
- }
66
-
67
- # requirements.txt 분석
68
- req_file = src_path / "requirements.txt"
69
- if req_file.exists():
70
- analysis["has_requirements"] = True
71
- try:
72
- reqs = req_file.read_text(encoding="utf-8").strip().split("\n")
73
- # 의존성 필터링 및 정리
74
- cleaned_deps = []
75
- for r in reqs:
76
- r = r.strip()
77
- if r and not r.startswith("#"):
78
- # 잘못된 버전 수정
79
- if "opencv-python==4.10.0" in r:
80
- r = "opencv-python>=4.10.0.82"
81
- elif "opencv-python==4.10" in r:
82
- r = "opencv-python>=4.10.0.82"
83
-
84
- # 버전 제약이 너무 엄격한 경우 완화
85
- if "==" in r and not r.startswith("git+"):
86
- pkg_name = r.split("==")[0]
87
- # 중요한 패키지는 버전 유지, 나머지는 >= 로 변경
88
- if pkg_name.lower() in ["torch", "tensorflow", "transformers", "numpy"]:
89
- cleaned_deps.append(r)
90
- else:
91
- version = r.split("==")[1]
92
- # 버전이 x.y 형식이면 x.y.0으로 변경
93
- if version.count('.') == 1:
94
- version = version + ".0"
95
- cleaned_deps.append(f"{pkg_name}>={version}")
96
- else:
97
- cleaned_deps.append(r)
98
- analysis["dependencies"] = cleaned_deps
99
- except:
100
- analysis["dependencies"] = []
101
-
102
- # README 분석
103
- for readme_name in ["README.md", "readme.md", "README.rst", "README.txt"]:
104
- readme_file = src_path / readme_name
105
- if readme_file.exists():
106
- analysis["has_readme"] = True
107
- try:
108
- readme_content = readme_file.read_text(encoding="utf-8")
109
- analysis["readme_content"] = readme_content[:5000] # 처음 5000자만
110
-
111
- # 설명 추출
112
- lines = readme_content.split("\n")
113
- for i, line in enumerate(lines[:10]):
114
- if line.strip() and not line.startswith("#") and not line.startswith("!"):
115
- analysis["description"] = line.strip()
116
- break
117
-
118
- # 설치 방법 찾기
119
- install_section = False
120
- usage_section = False
121
- for line in lines:
122
- if "install" in line.lower() and "#" in line:
123
- install_section = True
124
- usage_section = False
125
- continue
126
- elif "usage" in line.lower() and "#" in line:
127
- usage_section = True
128
- install_section = False
129
- continue
130
- elif "#" in line:
131
- install_section = False
132
- usage_section = False
133
-
134
- if install_section and line.strip():
135
- analysis["installation_steps"].append(line.strip())
136
- elif usage_section and line.strip():
137
- analysis["usage_examples"].append(line.strip())
138
- except:
139
- pass
140
-
141
- # 주요 Python 파일 찾기
142
- py_files = list(src_path.glob("**/*.py"))
143
- for py_file in py_files[:20]: # 최대 20개만 분석
144
- if "__pycache__" not in str(py_file) and ".git" not in str(py_file):
145
- relative_path = py_file.relative_to(src_path)
146
-
147
- # 엔트리 포인트 후보 찾기
148
- if any(name in py_file.name for name in ["main.py", "app.py", "demo.py", "run.py", "server.py", "streamlit_app.py"]):
149
- analysis["entry_points"].append(str(relative_path))
150
-
151
- # 파일 내용 간단히 확인
152
- try:
153
- content = py_file.read_text(encoding="utf-8")[:1000]
154
- if "if __name__" in content and "main" in content:
155
- analysis["entry_points"].append(str(relative_path))
156
-
157
- # 주요 import 확인
158
- if any(lib in content for lib in ["torch", "tensorflow", "transformers", "numpy", "pandas", "cv2", "PIL"]):
159
- analysis["key_files"].append({
160
- "path": str(relative_path),
161
- "preview": content[:500]
162
- })
163
- except:
164
- pass
165
-
166
- # 모델 파일 찾기
167
- model_extensions = [".pth", ".pt", ".ckpt", ".h5", ".pb", ".onnx", ".safetensors"]
168
- for ext in model_extensions:
169
- model_files = list(src_path.glob(f"**/*{ext}"))
170
- for mf in model_files[:5]:
171
- if ".git" not in str(mf):
172
- analysis["model_files"].append(str(mf.relative_to(src_path)))
173
-
174
- # 설정 파일 찾기
175
- config_patterns = ["config.json", "config.yaml", "config.yml", "*.json", "*.yaml"]
176
- for pattern in config_patterns:
177
- config_files = list(src_path.glob(pattern))
178
- for cf in config_files[:5]:
179
- if ".git" not in str(cf):
180
- analysis["config_files"].append(str(cf.relative_to(src_path)))
181
-
182
- return analysis
183
-
184
- # ========== Brave Search 헬퍼 (두 번째 코드에서) ========== #
185
- def search_repo_info(repo_url: str) -> str:
186
- """Brave Search로 레포지토리 정보 수집"""
187
- api_key = os.getenv("BAPI_TOKEN")
188
- if not api_key:
189
- return ""
190
-
191
- api_key = api_key.strip()
192
- headers = {"X-Subscription-Token": api_key, "Accept": "application/json"}
193
-
194
- # 레포지토리 이름 추출
195
- repo_parts = repo_url.rstrip("/").split("/")
196
- if len(repo_parts) >= 2:
197
- repo_name = f"{repo_parts[-2]}/{repo_parts[-1]}"
198
- else:
199
- return ""
200
-
201
- # 검색 쿼리들
202
- queries = [
203
- f'"{repo_name}" github tutorial',
204
- f'"{repo_name}" usage example',
205
- f'"{repo_name}" gradio streamlit demo'
206
- ]
207
-
208
- search_results = []
209
- for query in queries:
210
- params = {"q": query, "count": 3}
211
- try:
212
- resp = requests.get(
213
- "https://api.search.brave.com/res/v1/web/search",
214
- headers=headers,
215
- params=params,
216
- timeout=10
217
- )
218
- if resp.status_code == 200:
219
- results = resp.json().get("web", {}).get("results", [])
220
- for r in results:
221
- search_results.append({
222
- "title": r.get("title", ""),
223
- "description": r.get("description", ""),
224
- "url": r.get("url", "")
225
- })
226
- except:
227
- continue
228
-
229
- # 검색 결과를 텍스트로 변환
230
- search_text = f"Search results for {repo_name}:\n"
231
- for r in search_results[:5]:
232
- search_text += f"\n- {r['title']}: {r['description']}\n"
233
-
234
- return search_text
235
-
236
- # ========== AI 생성 헬퍼 (두 번째 코드에서) ========== #
237
- def generate_gradio_app(repo_url: str, analysis: Dict, search_info: str = "") -> Dict:
238
- """AI로 실제 동작하는 Gradio 앱 생성"""
239
-
240
- # 컨텍스트 준비
241
- context = f"""Repository URL: {repo_url}
242
-
243
- Repository Analysis:
244
- - Description: {analysis.get('description', 'N/A')}
245
- - Main Dependencies: {', '.join(analysis['dependencies'][:10])}
246
- - Entry Points: {', '.join(analysis['entry_points'][:5])}
247
- - Model Files: {', '.join(analysis['model_files'][:3])}
248
- - Config Files: {', '.join(analysis['config_files'][:3])}
249
-
250
- Key Files Found:
251
- """
252
-
253
- for kf in analysis.get('key_files', [])[:3]:
254
- context += f"\n--- {kf['path']} ---\n{kf['preview']}\n"
255
-
256
- if analysis.get('readme_content'):
257
- context += f"\n--- README.md (excerpt) ---\n{analysis['readme_content'][:2000]}\n"
258
-
259
- if search_info:
260
- context += f"\n--- Web Search Results ---\n{search_info}\n"
261
-
262
- # Installation steps
263
- if analysis['installation_steps']:
264
- context += f"\nInstallation Steps:\n"
265
- for step in analysis['installation_steps'][:5]:
266
- context += f"- {step}\n"
267
-
268
- # Usage examples
269
- if analysis['usage_examples']:
270
- context += f"\nUsage Examples:\n"
271
- for ex in analysis['usage_examples'][:5]:
272
- context += f"- {ex}\n"
273
-
274
- # System prompt
275
- system_prompt = """You are an expert at creating Gradio apps from GitHub repositories.
276
- Your task is to generate a complete, working Gradio interface that demonstrates the main functionality of the repository.
277
-
278
- CRITICAL REQUIREMENTS:
279
- 1. The app.py must be FULLY FUNCTIONAL and runnable
280
- 2. DO NOT use 'from agent import' or any repository-specific imports that won't exist
281
- 3. Handle errors gracefully with clear user feedback
282
- 4. Include API key inputs when external services are required
283
- 5. Create intuitive UI components for the main features
284
- 6. Include helpful descriptions and examples
285
- 7. Always use gradio>=5.35.0
286
- 8. If the project requires external APIs (OpenAI, Anthropic, etc), include:
287
- - API key input fields
288
- - Clear instructions on how to obtain keys
289
- - Environment variable setup guidance
290
- - Graceful handling when keys are missing
291
-
292
- Return ONLY valid JSON with these exact keys:
293
- - app_py: Complete Gradio app code
294
- - requirements_txt: All necessary dependencies including gradio>=5.35.0
295
- - summary: Brief description of what the app does"""
296
-
297
- # OpenAI 시도
298
- openai_key = os.getenv("OPENAI_API_KEY")
299
- if openai_key:
300
- try:
301
- headers = {
302
- "Authorization": f"Bearer {openai_key.strip()}",
303
- "Content-Type": "application/json"
304
- }
305
-
306
- payload = {
307
- "model": "gpt-4o-mini",
308
- "messages": [
309
- {"role": "system", "content": system_prompt},
310
- {"role": "user", "content": f"Create a fully functional Gradio app for this repository:\n\n{context[:8000]}"}
311
- ],
312
- "temperature": 0.3,
313
- "max_tokens": 4000
314
- }
315
-
316
- r = requests.post(
317
- "https://api.openai.com/v1/chat/completions",
318
- json=payload,
319
- headers=headers,
320
- timeout=30
321
- )
322
-
323
- if r.status_code == 200:
324
- response_text = r.json()["choices"][0]["message"]["content"]
325
- print("✅ OpenAI API로 스마트 앱 생성 성공")
326
-
327
- # JSON 파싱
328
- try:
329
- if "```json" in response_text:
330
- start = response_text.find("```json") + 7
331
- end = response_text.find("```", start)
332
- response_text = response_text[start:end].strip()
333
- elif "```" in response_text:
334
- start = response_text.find("```") + 3
335
- end = response_text.find("```", start)
336
- response_text = response_text[start:end].strip()
337
-
338
- result = json.loads(response_text)
339
-
340
- if not all(key in result for key in ["app_py", "requirements_txt", "summary"]):
341
- raise ValueError("Missing required keys in response")
342
-
343
- if "gradio" not in result.get("requirements_txt", "").lower():
344
- result["requirements_txt"] = "gradio>=5.35.0\n" + result.get("requirements_txt", "")
345
-
346
- return result
347
-
348
- except (json.JSONDecodeError, ValueError) as e:
349
- print(f"⚠️ JSON 파싱 오류: {e}")
350
- return None
351
- except Exception as e:
352
- print(f"⚠️ OpenAI API 오류: {e}")
353
-
354
- # Friendli 시도
355
- friendli_token = os.getenv("FRIENDLI_TOKEN")
356
- if friendli_token:
357
- try:
358
- headers = {
359
- "Authorization": f"Bearer {friendli_token.strip()}",
360
- "Content-Type": "application/json"
361
- }
362
-
363
- payload = {
364
- "model": "meta-llama-3.1-70b-instruct",
365
- "messages": [
366
- {"role": "system", "content": system_prompt},
367
- {"role": "user", "content": f"Create a Gradio app:\n{context[:6000]}"}
368
- ],
369
- "max_tokens": 4000,
370
- "temperature": 0.3
371
- }
372
-
373
- for endpoint in [
374
- "https://api.friendli.ai/v1/chat/completions",
375
- "https://api.friendli.ai/dedicated/v1/chat/completions"
376
- ]:
377
- r = requests.post(endpoint, json=payload, headers=headers, timeout=30)
378
- if r.status_code == 200:
379
- response_text = r.json()["choices"][0]["message"]["content"]
380
- print("✅ Friendli API로 스마트 앱 생성 ���공")
381
-
382
- if "```json" in response_text:
383
- start = response_text.find("```json") + 7
384
- end = response_text.find("```", start)
385
- response_text = response_text[start:end].strip()
386
-
387
- result = json.loads(response_text)
388
-
389
- if "gradio" not in result.get("requirements_txt", "").lower():
390
- result["requirements_txt"] = "gradio>=5.35.0\n" + result.get("requirements_txt", "")
391
-
392
- return result
393
- except Exception as e:
394
- print(f"⚠️ Friendli API 오류: {e}")
395
-
396
- # 스마트 기본 템플릿 생성
397
- print("ℹ️ AI API가 없어 스마트 기본 템플릿을 생성합니다.")
398
- return create_smart_template(repo_url, analysis)
399
-
400
- def create_smart_template(repo_url: str, analysis: Dict) -> Dict:
401
- """분석 결과를 바탕으로 스마트한 기본 템플릿 생성"""
402
-
403
- repo_name = Path(repo_url.rstrip("/")).name
404
- description = analysis.get("description", "A project deployed from GitHub") if analysis else "A project deployed from GitHub"
405
-
406
- # 의존성 기반 앱 타입 결정
407
- deps = " ".join(analysis.get("dependencies", [])) if analysis else ""
408
- has_ml = any(lib in deps for lib in ["torch", "tensorflow", "transformers", "scikit-learn"])
409
- has_cv = any(lib in deps for lib in ["cv2", "PIL", "pillow", "opencv"])
410
- has_nlp = any(lib in deps for lib in ["transformers", "nltk", "spacy"])
411
- has_audio = any(lib in deps for lib in ["librosa", "soundfile", "pyaudio"])
412
- has_3d = any(lib in deps for lib in ["gaussian", "rasterizer", "plyfile", "trimesh"])
413
-
414
- # 기본 requirements - git 의존성 제외
415
- requirements = ["gradio>=5.35.0"]
416
- if analysis and analysis.get("dependencies"):
417
- # git+ 의존성과 로컬 의존성 제외
418
- filtered_deps = []
419
- for dep in analysis["dependencies"][:15]:
420
- if not dep.startswith("git+") and not dep.startswith("-e") and not dep.startswith("file:"):
421
- # 버전이 너무 엄격한 경우 완화
422
- if "==" in dep and dep.split("==")[0].lower() not in ["torch", "tensorflow", "numpy"]:
423
- pkg_name = dep.split("==")[0]
424
- version = dep.split("==")[1]
425
- filtered_deps.append(f"{pkg_name}>={version}")
426
- else:
427
- filtered_deps.append(dep)
428
- requirements.extend(filtered_deps)
429
-
430
- # 앱 타입별 템플릿 생성
431
- if has_3d or "gaussian" in repo_name.lower():
432
- # 3D/Gaussian Splatting 앱
433
- app_code = f'''import gradio as gr
434
- import os
435
- import sys
436
-
437
- # Repository: {repo_url}
438
- # {description}
439
-
440
- # Note: This project requires CUDA-enabled GPU and complex build dependencies
441
- # The original repository uses custom CUDA extensions that need compilation
442
-
443
- def process_3d(input_file):
444
- """3D processing function - placeholder for actual implementation"""
445
- if input_file is None:
446
- return "Please upload a 3D file or image"
447
-
448
- info = """
449
- ## ⚠️ Build Requirements Notice
450
-
451
- This project requires:
452
- 1. CUDA-enabled GPU
453
- 2. Custom C++/CUDA extensions compilation
454
- 3. Specific versions of PyTorch with CUDA support
455
-
456
- The git dependencies in requirements.txt need PyTorch to be installed first.
457
-
458
- For full functionality:
459
- 1. Install PyTorch with CUDA: `pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118`
460
- 2. Install build tools: `apt-get install build-essential python3-dev ninja-build`
461
- 3. Then install other requirements
462
-
463
- Original repository: {repo_url}
464
- """
465
-
466
- return info
467
-
468
- # Gradio interface
469
- with gr.Blocks(title="{repo_name}") as demo:
470
- gr.Markdown(f"""
471
- # {repo_name.replace("-", " ").title()}
472
-
473
- {description}
474
-
475
- This space was created from: [{repo_url}]({repo_url})
476
-
477
- **Note**: This project has complex build requirements. See below for details.
478
- """)
479
-
480
- with gr.Row():
481
- with gr.Column():
482
- input_file = gr.File(label="Upload 3D File or Image")
483
- process_btn = gr.Button("Process", variant="primary")
484
-
485
- with gr.Column():
486
- output_info = gr.Markdown()
487
-
488
- process_btn.click(
489
- fn=process_3d,
490
- inputs=input_file,
491
- outputs=output_info
492
- )
493
-
494
- if __name__ == "__main__":
495
- demo.launch()
496
- '''
497
- elif has_cv:
498
- app_code = f'''import gradio as gr
499
- from PIL import Image
500
- import numpy as np
501
-
502
- # Repository: {repo_url}
503
- # {description}
504
-
505
- def process_image(image):
506
- """이미지 처리 함수 - 실제 구현으로 교체 필요"""
507
- if image is None:
508
- return None, "Please upload an image"
509
-
510
- # 여기에 실제 이미지 처리 로직 구현
511
- # 예: 모델 로드, 전처리, 추론, 후처리
512
-
513
- # 데모용 간단한 처리
514
- img_array = np.array(image)
515
- processed = Image.fromarray(img_array)
516
-
517
- info = f"Image shape: {img_array.shape}"
518
- return processed, info
519
-
520
- # Gradio 인터페이스 생성
521
- with gr.Blocks(title="{repo_name}") as demo:
522
- gr.Markdown(f"""
523
- # {repo_name.replace("-", " ").title()}
524
-
525
- {description}
526
-
527
- This space was created from: [{repo_url}]({repo_url})
528
- """)
529
-
530
- with gr.Row():
531
- with gr.Column():
532
- input_image = gr.Image(label="Input Image", type="pil")
533
- process_btn = gr.Button("Process Image", variant="primary")
534
-
535
- with gr.Column():
536
- output_image = gr.Image(label="Output Image")
537
- output_info = gr.Textbox(label="Information")
538
-
539
- process_btn.click(
540
- fn=process_image,
541
- inputs=input_image,
542
- outputs=[output_image, output_info]
543
- )
544
-
545
- if __name__ == "__main__":
546
- demo.launch()
547
- '''
548
-
549
- elif has_nlp:
550
- app_code = f'''import gradio as gr
551
-
552
- # Repository: {repo_url}
553
- # {description}
554
-
555
- def process_text(text, max_length=100):
556
- """텍스트 처리 함수 - 실제 구현으로 교체 필요"""
557
- if not text:
558
- return "Please enter some text"
559
-
560
- # 여기에 실제 NLP 처리 로직 구현
561
-
562
- # 데모용 간단한 처리
563
- word_count = len(text.split())
564
- char_count = len(text)
565
-
566
- result = f"""
567
- **Analysis Results:**
568
- - Word count: {word_count}
569
- - Character count: {char_count}
570
- - Average word length: {char_count/max(word_count, 1):.1f}
571
- """
572
-
573
- return result
574
-
575
- # Gradio 인터페이스 생성
576
- with gr.Blocks(title="{repo_name}") as demo:
577
- gr.Markdown(f"""
578
- # {repo_name.replace("-", " ").title()}
579
-
580
- {description}
581
-
582
- This space was created from: [{repo_url}]({repo_url})
583
- """)
584
-
585
- with gr.Row():
586
- with gr.Column():
587
- input_text = gr.Textbox(
588
- label="Input Text",
589
- placeholder="Enter your text here...",
590
- lines=5
591
- )
592
- max_length = gr.Slider(
593
- minimum=10,
594
- maximum=500,
595
- value=100,
596
- label="Max Length"
597
- )
598
- process_btn = gr.Button("Process Text", variant="primary")
599
-
600
- with gr.Column():
601
- output_text = gr.Markdown(label="Results")
602
-
603
- process_btn.click(
604
- fn=process_text,
605
- inputs=[input_text, max_length],
606
- outputs=output_text
607
- )
608
-
609
- if __name__ == "__main__":
610
- demo.launch()
611
- '''
612
-
613
- else:
614
- app_code = f'''import gradio as gr
615
-
616
- # Repository: {repo_url}
617
- # {description}
618
-
619
- def main_function(input_data):
620
- """메인 처리 함수 - 실제 구현으로 교체 필요"""
621
- if not input_data:
622
- return "Please provide input"
623
-
624
- # 여기에 실제 처리 로직 구현
625
-
626
- result = f"Processed successfully! Input received: {input_data}"
627
- return result
628
-
629
- # Gradio 인터페이스 생성
630
- with gr.Blocks(title="{repo_name}") as demo:
631
- gr.Markdown(f"""
632
- # {repo_name.replace("-", " ").title()}
633
-
634
- {description}
635
-
636
- This space was created from: [{repo_url}]({repo_url})
637
- """)
638
-
639
- with gr.Row():
640
- with gr.Column():
641
- input_data = gr.Textbox(
642
- label="Input",
643
- placeholder="Enter your input here...",
644
- lines=3
645
- )
646
- process_btn = gr.Button("Process", variant="primary")
647
-
648
- with gr.Column():
649
- output_data = gr.Textbox(label="Output")
650
-
651
- process_btn.click(
652
- fn=main_function,
653
- inputs=input_data,
654
- outputs=output_data
655
- )
656
-
657
- if __name__ == "__main__":
658
- demo.launch()
659
- '''
660
-
661
- return {
662
- "app_py": app_code,
663
- "requirements_txt": "\n".join(requirements),
664
- "summary": f"Smart template created for {repo_name}"
665
- }
666
-
667
- # ========== 통합된 메인 clone 함수 ========== #
668
- def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
669
- """GitHub 레포지토리를 HuggingFace Space로 복제하고 스마트하게 app.py 생성"""
670
- folder = str(uuid.uuid4())
671
-
672
- # 환경변수에서 HF_TOKEN 가져오기
673
- hf_token = os.getenv("HF_TOKEN")
674
- if not hf_token:
675
- yield "❌ Error: HF_TOKEN not found in environment variables. Please set it in the Space settings."
676
- return
677
-
678
- try:
679
- # Initialize progress messages
680
- yield "🔄 Starting clone process..."
681
-
682
- # Get user info
683
- api = HfApi(token=hf_token)
684
- try:
685
- user_info = api.whoami()
686
- username = user_info["name"]
687
- yield f"✅ Authenticated as: {username}"
688
- except Exception as e:
689
- yield f"❌ Authentication failed: {str(e)}"
690
- return
691
-
692
- # Clone the repository
693
- yield f"📥 Cloning repository from {repo_git}..."
694
-
695
- env = os.environ.copy()
696
-
697
- # Always skip LFS download initially to avoid errors
698
- env['GIT_LFS_SKIP_SMUDGE'] = '1'
699
- clone_cmd = ['git', 'clone', '--recurse-submodules', repo_git, folder]
700
- subprocess.run(clone_cmd, check=True, env=env)
701
-
702
- if not skip_lfs:
703
- # Try to pull LFS files
704
- yield "📦 Attempting to download LFS files..."
705
- try:
706
- subprocess.run(['git', 'lfs', 'install'], cwd=folder, check=True)
707
- lfs_result = subprocess.run(['git', 'lfs', 'pull'], cwd=folder, capture_output=True, text=True)
708
-
709
- if lfs_result.returncode != 0:
710
- yield f"⚠️ Warning: LFS download failed: {lfs_result.stderr}"
711
- yield "⚠️ Will remove LFS pointer files to prevent upload errors..."
712
- skip_lfs = True # Force LFS skip
713
- else:
714
- yield "✅ LFS files downloaded successfully"
715
- except Exception as e:
716
- yield f"⚠️ LFS error: {str(e)}"
717
- yield "⚠️ Will remove LFS pointer files to prevent upload errors..."
718
- skip_lfs = True # Force LFS skip
719
-
720
- # If we're skipping LFS, remove all LFS pointer files
721
- if skip_lfs:
722
- yield "🧹 Removing LFS pointer files..."
723
- removed_files = remove_lfs_files(folder)
724
- if removed_files:
725
- yield f"📝 Removed {len(removed_files)} LFS pointer files"
726
- # Show first few removed files
727
- for file in removed_files[:5]:
728
- yield f" - {file}"
729
- if len(removed_files) > 5:
730
- yield f" ... and {len(removed_files) - 5} more files"
731
-
732
- # 스마트 생성이 활성화된 경우
733
- if enable_smart_generation:
734
- yield "🔍 Analyzing repository structure..."
735
- folder_path = Path(folder)
736
- analysis = analyze_repository(folder_path)
737
-
738
- yield "🔍 Searching for additional information..."
739
- search_info = search_repo_info(repo_git)
740
-
741
- yield "🤖 Generating smart Gradio app..."
742
- generated = generate_gradio_app(repo_git, analysis, search_info)
743
-
744
- if generated and isinstance(generated, dict) and "app_py" in generated:
745
- # app.py 생성/덮어쓰기
746
- app_path = folder_path / "app.py"
747
- app_path.write_text(generated["app_py"], encoding="utf-8")
748
- yield "✅ Smart app.py generated"
749
-
750
- # requirements.txt 업데이트 - 의존성 순서 최적화
751
- req_path = folder_path / "requirements.txt"
752
- existing_reqs = []
753
- if req_path.exists():
754
- try:
755
- existing_reqs = req_path.read_text(encoding="utf-8").strip().split("\n")
756
- except:
757
- existing_reqs = []
758
-
759
- new_reqs = generated["requirements_txt"].strip().split("\n") if generated["requirements_txt"] else []
760
-
761
- # 의존성 정리 및 순서 최적화
762
- all_reqs = set()
763
- git_reqs = []
764
- torch_reqs = []
765
- regular_reqs = []
766
-
767
- for req in existing_reqs + new_reqs:
768
- req = req.strip()
769
- if not req or req.startswith("#"):
770
- continue
771
-
772
- # git+ 의존성은 따로 관리
773
- if req.startswith("git+"):
774
- git_reqs.append(req)
775
- # torch 관련 의존성은 먼저 설치
776
- elif "torch" in req.lower() or "cuda" in req.lower():
777
- torch_reqs.append(req)
778
- else:
779
- regular_reqs.append(req)
780
-
781
- # gradio 버전 확인 및 추가
782
- has_gradio = any("gradio" in req for req in regular_reqs)
783
- if not has_gradio:
784
- regular_reqs.append("gradio>=5.35.0")
785
-
786
- # 최종 requirements.txt 생성 (순서 중요)
787
- final_reqs = []
788
-
789
- # 1. torch 관련 먼저
790
- if torch_reqs:
791
- final_reqs.extend(sorted(set(torch_reqs)))
792
- final_reqs.append("") # 빈 줄
793
-
794
- # 2. 일반 의존성
795
- final_reqs.extend(sorted(set(regular_reqs)))
796
-
797
- # 3. git 의존성은 마지막에 (torch가 필요한 경우가 많음)
798
- if git_reqs:
799
- final_reqs.append("") # 빈 줄
800
- final_reqs.append("# Git dependencies (installed last)")
801
- final_reqs.extend(sorted(set(git_reqs)))
802
-
803
- req_content = "\n".join(final_reqs)
804
- req_path.write_text(req_content, encoding="utf-8")
805
- yield "✅ Requirements.txt updated with optimized dependency order"
806
-
807
- # README.md 업데이트 - 항상 생성하여 올바른 형식 보장
808
- readme_path = folder_path / "README.md"
809
- readme_content = f"""---
810
- title: {repo_hf.replace("-", " ").title()}
811
- emoji: 🚀
812
- colorFrom: blue
813
- colorTo: green
814
- sdk: {sdk_type}
815
- sdk_version: "5.35.0"
816
- app_file: app.py
817
- pinned: false
818
- ---
819
-
820
- # {repo_hf.replace("-", " ").title()}
821
-
822
- {analysis.get('description', 'Deployed from GitHub repository')}
823
-
824
- Deployed from: {repo_git}
825
-
826
- ## Features
827
- This Space provides a Gradio interface for the repository's main functionality.
828
- The app.py was automatically generated based on repository analysis.
829
-
830
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
831
- """
832
- readme_path.write_text(readme_content, encoding="utf-8")
833
- yield "✅ README.md created/updated"
834
- else:
835
- # 스마트 생성이 비활성화된 경우에도 README.md 확인 및 생성
836
- readme_path = Path(folder) / "README.md"
837
- if not readme_path.exists():
838
- # 기본 README.md 생성
839
- readme_content = f"""---
840
- title: {repo_hf.replace("-", " ").title()}
841
- emoji: 🚀
842
- colorFrom: blue
843
- colorTo: green
844
- sdk: {sdk_type}
845
- sdk_version: "5.35.0"
846
- app_file: app.py
847
- pinned: false
848
- ---
849
-
850
- # {repo_hf.replace("-", " ").title()}
851
-
852
- Deployed from: {repo_git}
853
-
854
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
855
- """
856
- readme_path.write_text(readme_content, encoding="utf-8")
857
- yield "✅ README.md created with required configuration"
858
-
859
- # requirements.txt 확인 및 문제 해결
860
- req_path = Path(folder) / "requirements.txt"
861
- if req_path.exists():
862
- try:
863
- req_content = req_path.read_text(encoding="utf-8")
864
- lines = req_content.strip().split("\n")
865
-
866
- # 의존성 분류 및 중복 제거
867
- torch_deps = []
868
- git_deps = []
869
- regular_deps = []
870
- problem_git_deps = [] # torch가 필요한 git 의존성
871
- seen_packages = {} # 패키지명 -> 전체 의존성 매핑
872
-
873
- for line in lines:
874
- line = line.strip()
875
- if not line or line.startswith("#"):
876
- continue
877
-
878
- if line.startswith("git+"):
879
- # git 의존성 중 CUDA/컴파일이 필요한 경우 확인
880
- cuda_keywords = ["gaussian", "rasterizer", "diff-", "cuda", "nvdiffrast", "tiny-cuda"]
881
- if any(keyword in line.lower() for keyword in cuda_keywords):
882
- problem_git_deps.append(line)
883
- else:
884
- git_deps.append(line)
885
- else:
886
- # 패키지명 추출 (버전 지정자 제거)
887
- pkg_name = line.split("==")[0].split(">=")[0].split("<=")[0].split(">")[0].split("<")[0].split("~=")[0].split("[")[0].strip()
888
-
889
- # 특별한 설치가 필요한 패키지들
890
- special_install_packages = ["pytorch3d", "torch-scatter", "torch-sparse", "torch-geometric", "tiny-cuda-nn"]
891
-
892
- if pkg_name in special_install_packages:
893
- problem_git_deps.append(f"# {line} # Requires special installation")
894
- yield f" → Marked {pkg_name} for special handling"
895
- continue
896
-
897
- # 특정 패키지의 잘못된 버전 수정
898
- if pkg_name == "opencv-python":
899
- if "==4.10.0" in line or "==4.10" in line:
900
- line = "opencv-python>=4.10.0.82"
901
- yield "📝 Fixed opencv-python version (4.10.0 → 4.10.0.82)"
902
-
903
- # 중복 체크
904
- if pkg_name in seen_packages:
905
- # 이미 있는 패키지면 버전 비교
906
- existing = seen_packages[pkg_name]
907
- # 더 구체적인 버전을 선택 (== > >= > 버전 없음)
908
- if "==" in line and "==" not in existing:
909
- seen_packages[pkg_name] = line
910
- elif "==" not in existing and ">=" in line and ">=" not in existing:
911
- seen_packages[pkg_name] = line
912
- # 같은 수준이면 더 최신 버전 선택
913
- elif "==" in line and "==" in existing:
914
- try:
915
- new_ver = line.split("==")[1]
916
- old_ver = existing.split("==")[1]
917
- # 버전 비교 (간단한 문자열 비교)
918
- if new_ver > old_ver:
919
- seen_packages[pkg_name] = line
920
- except:
921
- pass
922
- yield f" → Resolved duplicate: {pkg_name} - using {seen_packages[pkg_name]}"
923
- else:
924
- seen_packages[pkg_name] = line
925
-
926
- # 분류된 의존성으로 재구성
927
- for pkg_name, dep_line in seen_packages.items():
928
- if any(t in pkg_name.lower() for t in ["torch==", "torch>=", "torch~=", "torch<", "torch>", "torch[", "torchvision", "torchaudio"]):
929
- torch_deps.append(dep_line)
930
- else:
931
- regular_deps.append(dep_line)
932
-
933
- # gradio 버전 확인
934
- has_gradio = any("gradio" in pkg for pkg in seen_packages.keys())
935
- if not has_gradio:
936
- regular_deps.append("gradio>=5.35.0")
937
- seen_packages["gradio"] = "gradio>=5.35.0"
938
-
939
- # torch가 없으면 추가 (CUDA 의존성이 있는 경우)
940
- torch_packages = [p for p in seen_packages.keys() if p == "torch"]
941
- if not torch_packages and (problem_git_deps or any("torch" in dep for dep in git_deps)):
942
- torch_deps.append("torch>=2.0.0")
943
- yield "⚠️ Added torch dependency for git packages"
944
-
945
- # CPU 버전 torch로 대체 제안
946
- cpu_torch_suggested = False
947
- for i, dep in enumerate(torch_deps):
948
- if "torch==" in dep or "torch>=" in dep:
949
- # CUDA 버전이 명시되어 있으면 CPU 버전 제안
950
- if "+cu" in dep:
951
- torch_deps[i] = dep.split("+cu")[0]
952
- cpu_torch_suggested = True
953
-
954
- if cpu_torch_suggested:
955
- yield "ℹ️ Converted torch to CPU version for HuggingFace Spaces compatibility"
956
-
957
- # 재정렬된 requirements.txt 작성
958
- new_lines = []
959
-
960
- # 1. 먼저 torch 설치
961
- if torch_deps:
962
- new_lines.append("# PyTorch - Must be installed first")
963
- new_lines.extend(sorted(set(torch_deps)))
964
- new_lines.append("")
965
-
966
- # 2. 일반 의존성
967
- if regular_deps:
968
- # 특별한 설치가 필요한 패키지들
969
- special_packages = {
970
- "pytorch3d": "# pytorch3d requires special installation from https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md",
971
- "torch-scatter": "# torch-scatter requires matching torch version",
972
- "torch-sparse": "# torch-sparse requires matching torch version",
973
- "torch-geometric": "# torch-geometric requires special installation"
974
- }
975
-
976
- # 추가 버전 검증 및 수정
977
- validated_deps = []
978
- problematic_versions = {
979
- "opencv-python": {
980
- "4.10.0": "4.10.0.84",
981
- "4.10": "4.10.0.84",
982
- "4.9.0": "4.9.0.80",
983
- "4.8.0": "4.8.0.76"
984
- },
985
- "pillow": {
986
- "10.0": "10.0.0",
987
- "9.5": "9.5.0"
988
- }
989
- }
990
-
991
- skipped_packages = []
992
-
993
- for dep in regular_deps:
994
- pkg_name = dep.split("==")[0].split(">=")[0].split("[")[0].strip()
995
-
996
- # 특별한 설치가 필요한 패키지는 주석 처리
997
- if pkg_name in special_packages:
998
- skipped_packages.append(f"# {dep} {special_packages[pkg_name]}")
999
- yield f" → Commented out {pkg_name} (requires special installation)"
1000
- continue
1001
-
1002
- # 버전 수정이 필요한 패키지 처리
1003
- if pkg_name in problematic_versions and "==" in dep:
1004
- version = dep.split("==")[1].strip()
1005
- if version in problematic_versions[pkg_name]:
1006
- new_version = problematic_versions[pkg_name][version]
1007
- new_dep = f"{pkg_name}>={new_version}"
1008
- validated_deps.append(new_dep)
1009
- yield f" → Fixed version: {dep} → {new_dep}"
1010
- else:
1011
- validated_deps.append(dep)
1012
- else:
1013
- validated_deps.append(dep)
1014
-
1015
- new_lines.append("# Core dependencies")
1016
- # opencv-python 중복 제거 확인
1017
- deduped_regular = []
1018
- seen = set()
1019
- for dep in sorted(validated_deps):
1020
- pkg_name = dep.split("==")[0].split(">=")[0].split("<=")[0].split(">")[0].split("<")[0].split("~=")[0].split("[")[0].strip()
1021
- if pkg_name not in seen:
1022
- deduped_regular.append(dep)
1023
- seen.add(pkg_name)
1024
- new_lines.extend(deduped_regular)
1025
- new_lines.append("")
1026
-
1027
- # 특별한 설치가 필요한 패키지들을 주석으로 추가
1028
- if skipped_packages:
1029
- new_lines.append("# ⚠️ The following packages require special installation:")
1030
- new_lines.extend(skipped_packages)
1031
- new_lines.append("")
1032
-
1033
- # 3. 일반 git 의존성
1034
- if git_deps:
1035
- new_lines.append("# Git dependencies")
1036
- new_lines.extend(sorted(set(git_deps)))
1037
- new_lines.append("")
1038
-
1039
- # 문제가 되는 git 의존성과 특수 패키지는 주석 처리
1040
- if problem_git_deps:
1041
- new_lines.append("")
1042
- new_lines.append("# ⚠️ CUDA-dependent packages and special installations")
1043
- new_lines.append("# These packages require special installation methods:")
1044
- new_lines.append("# - pytorch3d: Install from https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md")
1045
- new_lines.append("# - CUDA packages: Require CUDA toolkit and GPU environment")
1046
- new_lines.append("#")
1047
- for dep in problem_git_deps:
1048
- if not dep.startswith("#"):
1049
- new_lines.append(f"# {dep}")
1050
- else:
1051
- new_lines.append(dep)
1052
-
1053
- # 경고 메시지 출력
1054
- yield f"⚠️ Commented out {len(problem_git_deps)} packages requiring special installation"
1055
-
1056
- # 빈 줄 제거하고 정리
1057
- final_lines = []
1058
- for i, line in enumerate(new_lines):
1059
- # 주석 다음에 바로 내용이 있는지 확인
1060
- if line.strip() and not (i > 0 and new_lines[i-1].startswith("#") and line == ""):
1061
- final_lines.append(line)
1062
- elif line == "" and i < len(new_lines) - 1: # 중간의 빈 줄은 유지
1063
- final_lines.append(line)
1064
-
1065
- req_path.write_text("\n".join(final_lines), encoding="utf-8")
1066
-
1067
- # 의존성 개수 통계
1068
- total_deps = len(torch_deps) + len(regular_deps) + len(git_deps) + len(problem_git_deps)
1069
- yield f"✅ Reorganized requirements.txt - Total {total_deps} dependencies (duplicates removed)"
1070
- if torch_deps:
1071
- yield f" - PyTorch packages: {len(torch_deps)}"
1072
- if regular_deps:
1073
- yield f" - Regular packages: {len(set(regular_deps))}"
1074
- if git_deps or problem_git_deps:
1075
- yield f" - Git dependencies: {len(git_deps + problem_git_deps)} ({len(problem_git_deps)} commented)"
1076
-
1077
- # pre-requirements.txt는 더 이상 필요하지 않음 (통합된 requirements.txt 사용)
1078
- # packages.txt도 HF Spaces 기본 환경에서는 불필요
1079
-
1080
- # README.md에 로컬 실행 가이드 추가
1081
- if problem_git_deps:
1082
- readme_path = Path(folder) / "README.md"
1083
- if readme_path.exists():
1084
- try:
1085
- existing_readme = readme_path.read_text(encoding="utf-8")
1086
-
1087
- # YAML 헤더 이후에 로컬 실행 가이드 추가
1088
- if "---" in existing_readme:
1089
- parts = existing_readme.split("---", 2)
1090
- if len(parts) >= 3:
1091
- yaml_header = parts[1]
1092
- content = parts[2]
1093
-
1094
- # repo_id가 정의되지 않았으므로 repo_hf와 username 사용
1095
- repo_id = f"{username}/{slugify(repo_hf)}"
1096
-
1097
- local_guide = f"""
1098
- ## ⚠️ GPU/CUDA Requirements
1099
-
1100
- This project contains CUDA-dependent packages that cannot run on standard HuggingFace Spaces (CPU environment).
1101
-
1102
- ### Running Locally with GPU
1103
-
1104
- ```bash
1105
- # Install CUDA Toolkit (if not installed)
1106
- # Visit: https://developer.nvidia.com/cuda-downloads
1107
-
1108
- # Clone this Space
1109
- git clone https://huggingface.co/spaces/{repo_id}
1110
- cd {repo_id.split('/')[-1]}
1111
-
1112
- # Install PyTorch with CUDA
1113
- pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118
1114
-
1115
- # Uncomment CUDA dependencies in requirements.txt
1116
- # Then install all requirements
1117
- pip install -r requirements.txt
1118
- ```
1119
-
1120
- ### Enabling GPU on HuggingFace Spaces
1121
-
1122
- To use GPU on this Space:
1123
- 1. Go to Settings → Hardware
1124
- 2. Select GPU (T4 or A10G)
1125
- 3. Costs apply for GPU usage
1126
-
1127
- ---
1128
-
1129
- """
1130
- new_readme = f"---{yaml_header}---\n{local_guide}{content}"
1131
- readme_path.write_text(new_readme, encoding="utf-8")
1132
- yield "📝 Added GPU setup guide to README.md"
1133
- except Exception as e:
1134
- yield f"⚠️ Could not update README with GPU guide: {str(e)}"
1135
- except Exception as e:
1136
- yield f"⚠️ Error processing requirements.txt: {str(e)}"
1137
-
1138
- # Remove .git directory to save space and avoid issues
1139
- git_dir = os.path.join(folder, '.git')
1140
- if os.path.exists(git_dir):
1141
- shutil.rmtree(git_dir)
1142
- yield "🧹 Removed .git directory"
1143
-
1144
- # Also clean up .gitattributes to remove LFS tracking
1145
- gitattributes_path = os.path.join(folder, '.gitattributes')
1146
- if os.path.exists(gitattributes_path):
1147
- yield "🧹 Cleaning .gitattributes file..."
1148
- with open(gitattributes_path, 'r') as f:
1149
- lines = f.readlines()
1150
-
1151
- new_lines = []
1152
- for line in lines:
1153
- if 'filter=lfs' not in line:
1154
- new_lines.append(line)
1155
-
1156
- if new_lines:
1157
- with open(gitattributes_path, 'w') as f:
1158
- f.writelines(new_lines)
1159
- else:
1160
- # Remove empty .gitattributes
1161
- os.remove(gitattributes_path)
1162
-
1163
- # 기존 README가 있는지 확인하고 Space 헤더가 없으면 추가
1164
- readme_path = Path(folder) / "README.md"
1165
- if readme_path.exists():
1166
- try:
1167
- existing_content = readme_path.read_text(encoding="utf-8")
1168
- # YAML 헤더가 없으면 추가
1169
- if not existing_content.strip().startswith("---"):
1170
- yaml_header = f"""---
1171
- title: {repo_hf.replace("-", " ").title()}
1172
- emoji: 🚀
1173
- colorFrom: blue
1174
- colorTo: green
1175
- sdk: {sdk_type}
1176
- sdk_version: "5.35.0"
1177
- app_file: app.py
1178
- pinned: false
1179
- ---
1180
-
1181
- """
1182
- new_content = yaml_header + existing_content
1183
- readme_path.write_text(new_content, encoding="utf-8")
1184
- yield "✅ Updated README.md with Space configuration"
1185
- except Exception as e:
1186
- yield f"⚠️ Could not update README.md: {str(e)}"
1187
-
1188
- # Create the HuggingFace repo with retries
1189
- yield "🏗️ Creating Hugging Face Space..."
1190
-
1191
- repo_id = f"{username}/{slugify(repo_hf)}"
1192
- space_created = False
1193
-
1194
- # Space 생성 시도
1195
- for attempt in range(3):
1196
- try:
1197
- yield f" Creating Space: {repo_id} (attempt {attempt + 1}/3)"
1198
-
1199
- # 먼저 기존 Space가 있는지 확인
1200
- try:
1201
- existing_space = api.space_info(repo_id=repo_id, token=hf_token)
1202
- yield f" ℹ️ Space already exists: {existing_space.id}"
1203
- space_created = True
1204
- break
1205
- except:
1206
- # Space가 없으면 생성
1207
- pass
1208
-
1209
- # Space 생성
1210
- create_result = api.create_repo(
1211
- repo_id=repo_id,
1212
- repo_type="space",
1213
- space_sdk=sdk_type,
1214
- exist_ok=True,
1215
- private=False,
1216
- token=hf_token
1217
- )
1218
-
1219
- # 생성 후 잠시 대기
1220
- import time
1221
- time.sleep(3)
1222
-
1223
- # 생성 확인
1224
- space_info = api.space_info(repo_id=repo_id, token=hf_token)
1225
- yield f" ✅ Space created successfully: {space_info.id}"
1226
- space_created = True
1227
- break
1228
-
1229
- except Exception as e:
1230
- error_msg = str(e)
1231
-
1232
- # Rate limit 에러 처리
1233
- if "429" in error_msg or "Too Many Requests" in error_msg:
1234
- yield f"""
1235
- ❌ **Rate Limit Error**
1236
-
1237
- You have reached the HuggingFace API rate limit for creating Spaces.
1238
-
1239
- **What this means:**
1240
- - New users have limited Space creation quotas
1241
- - You need to wait before creating more Spaces (usually 17-24 hours)
1242
- - Your limits will increase over time as you use HuggingFace
1243
-
1244
- **Solutions:**
1245
- 1. **Wait**: Try again in 17-24 hours
1246
- 2. **Use existing Space**: Update an existing Space instead of creating a new one
1247
- 3. **Contact HuggingFace**: Email [email protected] if you need immediate access
1248
- 4. **Alternative**: Create the Space manually on HuggingFace and upload the files
1249
-
1250
- **Manual Space Creation Steps:**
1251
- 1. Go to https://huggingface.co/new-space
1252
- 2. Create a Space named: `{repo_hf}`
1253
- 3. Select SDK: {sdk_type}
1254
- 4. After creation, use the "Files" tab to upload your repository contents
1255
-
1256
- Repository has been cloned to local folder and is ready for manual upload.
1257
- """
1258
- # Rate limit의 경우 재시도하지 않음
1259
- raise Exception(f"Rate limit reached. Please try again later or create the Space manually.")
1260
-
1261
- yield f" ⚠️ Attempt {attempt + 1} failed: {error_msg[:100]}..."
1262
- if attempt < 2:
1263
- yield " Retrying in 5 seconds..."
1264
- import time
1265
- time.sleep(5)
1266
- else:
1267
- yield f" ❌ Failed to create space after 3 attempts"
1268
- raise Exception(f"Could not create space: {error_msg}")
1269
-
1270
- if not space_created:
1271
- raise Exception("Failed to create space")
1272
-
1273
- # Check folder size
1274
- folder_size = sum(os.path.getsize(os.path.join(dirpath, filename))
1275
- for dirpath, dirnames, filenames in os.walk(folder)
1276
- for filename in filenames) / (1024 * 1024) # Size in MB
1277
-
1278
- yield f"📊 Folder size: {folder_size:.2f} MB"
1279
-
1280
- # Count remaining files
1281
- file_count = sum(len(files) for _, _, files in os.walk(folder))
1282
- yield f"📁 Total files to upload: {file_count}"
1283
-
1284
- # Upload to HuggingFace with retry logic
1285
- upload_success = False
1286
- max_retries = 3
1287
-
1288
- for attempt in range(max_retries):
1289
- try:
1290
- if attempt > 0:
1291
- yield f"📤 Upload attempt {attempt + 1}/{max_retries}..."
1292
- import time
1293
- time.sleep(5) # 재시도 전 대기
1294
-
1295
- if folder_size > 500: # If larger than 500MB, use upload_large_folder
1296
- yield "📤 Uploading large folder to Hugging Face (this may take several minutes)..."
1297
- api.upload_large_folder(
1298
- folder_path=folder,
1299
- repo_id=repo_id,
1300
- repo_type="space",
1301
- token=hf_token,
1302
- commit_message="Deploy from GitHub repository",
1303
- ignore_patterns=["*.pyc", "__pycache__", ".git*", ".DS_Store", "*.egg-info"]
1304
- )
1305
- else:
1306
- yield "📤 Uploading to Hugging Face..."
1307
- api.upload_folder(
1308
- folder_path=folder,
1309
- repo_id=repo_id,
1310
- repo_type="space",
1311
- token=hf_token,
1312
- commit_message="Deploy from GitHub repository",
1313
- ignore_patterns=["*.pyc", "__pycache__", ".git*", ".DS_Store", "*.egg-info"]
1314
- )
1315
-
1316
- upload_success = True
1317
- yield "✅ Upload completed successfully"
1318
- break
1319
-
1320
- except Exception as upload_error:
1321
- error_msg = str(upload_error)
1322
-
1323
- if "404" in error_msg and attempt < max_retries - 1:
1324
- yield f" ⚠️ Upload failed (404). Space might not be ready yet."
1325
- yield " Waiting 10 seconds before retry..."
1326
- import time
1327
- time.sleep(10)
1328
-
1329
- # Space 다시 확인
1330
- try:
1331
- space_info = api.space_info(repo_id=repo_id, token=hf_token)
1332
- yield f" ✅ Space confirmed to exist"
1333
- except:
1334
- # Space 재생성 시도
1335
- yield " 🔄 Attempting to recreate space..."
1336
- try:
1337
- api.create_repo(
1338
- repo_id=repo_id,
1339
- repo_type="space",
1340
- space_sdk=sdk_type,
1341
- exist_ok=True,
1342
- private=False,
1343
- token=hf_token
1344
- )
1345
- yield " ✅ Space recreated"
1346
- except Exception as recreate_error:
1347
- yield f" ❌ Could not recreate space: {str(recreate_error)}"
1348
-
1349
- elif "LFS pointer" in error_msg:
1350
- yield "❌ Upload failed due to remaining LFS pointer files"
1351
- yield "🔍 Searching for remaining LFS pointers..."
1352
-
1353
- # Do another scan for LFS files
1354
- lfs_count = 0
1355
- for root, dirs, files in os.walk(folder):
1356
- for file in files:
1357
- filepath = os.path.join(root, file)
1358
- if is_lfs_pointer_file(filepath):
1359
- lfs_count += 1
1360
- if lfs_count <= 5: # 처음 5개만 표시
1361
- yield f" Found LFS pointer: {filepath.replace(folder + os.sep, '')}"
1362
- if lfs_count > 5:
1363
- yield f" ... and {lfs_count - 5} more LFS pointer files"
1364
- raise upload_error
1365
-
1366
- elif attempt == max_retries - 1:
1367
- yield f"❌ Upload failed after {max_retries} attempts: {error_msg[:200]}..."
1368
- raise upload_error
1369
- else:
1370
- yield f" ⚠️ Upload failed: {error_msg[:100]}..."
1371
-
1372
- if not upload_success:
1373
- raise Exception("Upload failed after all retries")
1374
-
1375
- # Clean up the temporary folder
1376
- shutil.rmtree(folder)
1377
-
1378
- space_url = f"https://huggingface.co/spaces/{repo_id}"
1379
-
1380
- # 성공 메시지와 상세 정보 출력
1381
- yield f"""
1382
- ✅ **Successfully created Space!**
1383
-
1384
- 🔗 **Your Space URL**: {space_url}
1385
-
1386
- 📋 **Deployment Summary:**
1387
- - **Space ID**: `{repo_id}`
1388
- - **Source Repository**: {repo_git}
1389
- - **SDK Type**: {sdk_type}
1390
- - **Smart Generation**: {'Enabled' if enable_smart_generation else 'Disabled'}
1391
- - **LFS Files**: {'Skipped' if skip_lfs else 'Included'}
1392
-
1393
- 🚀 **Next Steps:**
1394
- 1. Click the link above to visit your Space
1395
- 2. Wait 2-3 minutes for the initial build to complete
1396
- 3. Check the "Logs" tab if you encounter any issues
1397
- 4. The Space will automatically rebuild when you make changes
1398
-
1399
- 💡 **Tips:**
1400
- - If the build fails, check the requirements.txt file
1401
- - For GPU-required projects, enable GPU in Space Settings
1402
- - You can edit files directly in the Space's Files tab
1403
- """
1404
-
1405
- if skip_lfs:
1406
- yield "\n⚠️ **Note**: LFS files were removed. The Space may be missing some large files (videos, models, etc.)"
1407
-
1408
- if enable_smart_generation:
1409
- yield "\n🤖 **Smart Generation**: An AI-generated Gradio interface was created based on repository analysis"
1410
-
1411
- # 추가 안내사항
1412
- if any(dep.startswith("git+") for dep in analysis.get("dependencies", [])) if enable_smart_generation else False:
1413
- yield "\n⚠️ **Build Notice**: This repository contains git dependencies that may take longer to build"
1414
-
1415
- except subprocess.CalledProcessError as e:
1416
- if os.path.exists(folder):
1417
- shutil.rmtree(folder)
1418
- yield f"❌ Git error: {str(e)}"
1419
- except Exception as e:
1420
- if os.path.exists(folder):
1421
- shutil.rmtree(folder)
1422
- yield f"❌ Error: {str(e)}"
1423
-
1424
- # Custom CSS for better styling
1425
- css = """
1426
- .container {
1427
- max-width: 900px;
1428
- margin: auto;
1429
- padding: 20px;
1430
- }
1431
- .output-box {
1432
- min-height: 100px;
1433
- max-height: 400px;
1434
- overflow-y: auto;
1435
- font-family: monospace;
1436
- font-size: 14px;
1437
- line-height: 1.5;
1438
- }
1439
- .warning-box {
1440
- background-color: #fff3cd;
1441
- border: 1px solid #ffeaa7;
1442
- border-radius: 4px;
1443
- padding: 12px;
1444
- margin: 10px 0;
1445
- }
1446
- .error-box {
1447
- background-color: #f8d7da;
1448
- border: 1px solid #f5c6cb;
1449
- border-radius: 4px;
1450
- padding: 12px;
1451
- margin: 10px 0;
1452
- }
1453
- .info-box {
1454
- background-color: #d1ecf1;
1455
- border: 1px solid #bee5eb;
1456
- border-radius: 4px;
1457
- padding: 12px;
1458
- margin: 10px 0;
1459
- }
1460
- """
1461
-
1462
- with gr.Blocks(css=css) as demo:
1463
- gr.Markdown("# 🚀 Smart GitHub to Hugging Face Space Cloner")
1464
- gr.Markdown("""
1465
- Clone any public GitHub repository and convert it to a Hugging Face Space!
1466
-
1467
- **Features:**
1468
- - ✅ Automatic handling of Git LFS issues
1469
- - ✅ Removes problematic LFS pointer files
1470
- - ✅ Progress updates during cloning
1471
- - ✅ Support for large repositories
1472
- - 🤖 **NEW: Smart app.py generation with AI analysis**
1473
- """)
1474
-
1475
- # Check for HF_TOKEN
1476
- if not os.getenv("HF_TOKEN"):
1477
- gr.Markdown("""
1478
- <div class="error-box">
1479
- <strong>❌ HF_TOKEN Required</strong><br>
1480
- Please set the HF_TOKEN environment variable in your Space settings:
1481
- <ol>
1482
- <li>Go to your Space Settings</li>
1483
- <li>Navigate to "Variables and secrets"</li>
1484
- <li>Add a new secret: Name = <code>HF_TOKEN</code>, Value = your Hugging Face write token</li>
1485
- <li>Get a token from: <a href="https://huggingface.co/settings/tokens" target="_blank">https://huggingface.co/settings/tokens</a></li>
1486
- </ol>
1487
- </div>
1488
- """)
1489
- else:
1490
- gr.Markdown("""
1491
- <div class="info-box">
1492
- <strong>✅ HF_TOKEN Found</strong><br>
1493
- Ready to clone repositories to your Hugging Face account.
1494
- </div>
1495
- """)
1496
-
1497
- # Rate limit 경고 추가
1498
- gr.Markdown("""
1499
- <div class="warning-box">
1500
- <strong>⚠️ Rate Limits for New Users</strong><br>
1501
- New HuggingFace users have limited Space creation quotas:
1502
- <ul>
1503
- <li>You can create only a few Spaces per day initially</li>
1504
- <li>Limits increase over time with account activity</li>
1505
- <li>If you hit the limit, wait 17-24 hours or update existing Spaces</li>
1506
- <li>Contact [email protected] for immediate access needs</li>
1507
- </ul>
1508
- </div>
1509
- """)
1510
-
1511
- with gr.Row():
1512
- with gr.Column():
1513
- repo_git = gr.Textbox(
1514
- label="GitHub Repository URL",
1515
- placeholder="https://github.com/username/repository",
1516
- info="Enter the full URL of the GitHub repository"
1517
- )
1518
- repo_hf = gr.Textbox(
1519
- label="Hugging Face Space Name",
1520
- placeholder="my-awesome-space",
1521
- info="Choose a name for your new Space (will be slugified)"
1522
- )
1523
- sdk_choices = gr.Radio(
1524
- ["gradio", "streamlit", "docker", "static"],
1525
- label="Space SDK",
1526
- value="gradio",
1527
- info="Select the SDK type for your Space"
1528
- )
1529
- skip_lfs = gr.Checkbox(
1530
- label="Skip Git LFS files",
1531
- value=True, # Default to True due to common LFS issues
1532
- info="Recommended if the repo has large files (videos, models, datasets)"
1533
- )
1534
- enable_smart_generation = gr.Checkbox(
1535
- label="🤖 Enable Smart app.py Generation (Beta)",
1536
- value=False,
1537
- info="Analyze repository and generate working Gradio interface with AI"
1538
- )
1539
-
1540
- gr.Markdown("""
1541
- <div class="warning-box">
1542
- <strong>⚠️ About Git LFS</strong><br>
1543
- Many repos use Git LFS for large files. If these files are missing or causing errors,
1544
- keeping "Skip Git LFS files" checked will remove them and allow successful cloning.
1545
- </div>
1546
- """)
1547
-
1548
- # Smart Generation 정보
1549
- gr.Markdown("""
1550
- <div class="info-box">
1551
- <strong>🤖 About Smart Generation</strong><br>
1552
- When enabled, the system will:
1553
- <ul>
1554
- <li>Analyze repository structure and dependencies</li>
1555
- <li>Search for usage examples and documentation</li>
1556
- <li>Generate a working Gradio interface using AI</li>
1557
- <li>Create appropriate requirements.txt</li>
1558
- </ul>
1559
- <br>
1560
- <strong>Required Environment Variables:</strong><br>
1561
- - <code>OPENAI_API_KEY</code> or <code>FRIENDLI_TOKEN</code> for AI generation<br>
1562
- - <code>BAPI_TOKEN</code> for web search (optional)
1563
- </div>
1564
- """)
1565
-
1566
- btn = gr.Button("🎯 Clone Repository", variant="primary")
1567
-
1568
- with gr.Column():
1569
- output = gr.Textbox(
1570
- label="Progress",
1571
- lines=15,
1572
- elem_classes=["output-box"],
1573
- interactive=False,
1574
- show_copy_button=True
1575
- )
1576
-
1577
- gr.Markdown("""
1578
- ### 📝 Instructions:
1579
- 1. **Setup**: Make sure HF_TOKEN is set in your Space settings
1580
- 2. **Repository URL**: Enter the full GitHub repository URL
1581
- 3. **Space Name**: Choose a name for your new Space
1582
- 4. **SDK**: Select the appropriate SDK for your Space
1583
- 5. **LFS Files**: Keep "Skip Git LFS files" checked if unsure
1584
- 6. **Smart Generation**: Enable to automatically create working app.py
1585
- 7. **Clone**: Click "Clone Repository" and monitor progress
1586
-
1587
- ### 🚨 Troubleshooting:
1588
-
1589
- <div class="error-box">
1590
- <strong>LFS pointer file errors?</strong><br>
1591
- Make sure "Skip Git LFS files" is checked. This removes large file pointers that can cause upload failures.
1592
- </div>
1593
-
1594
- - **Missing files after cloning**: The repository used Git LFS for large files that are no longer available
1595
- - **Slow uploads**: Large repositories take time. Consider using a smaller repository or removing unnecessary files
1596
- - **Space doesn't work**: Check if removed LFS files were essential (models, data, etc.) and add them manually
1597
- - **Smart Generation issues**: Make sure you have the required API keys set in environment variables
1598
- """)
1599
-
1600
- btn.click(
1601
- fn=clone,
1602
- inputs=[repo_git, repo_hf, sdk_choices, skip_lfs, enable_smart_generation],
1603
- outputs=output
1604
- )
1605
-
1606
- # 성공 사례 및 팁
1607
- gr.Markdown("""
1608
- ### 🌟 Success Tips:
1609
-
1610
- 1. **For ML/AI Projects**: Enable GPU in Space Settings after deployment
1611
- 2. **For Large Files**: Use Git LFS or host models on HuggingFace Hub
1612
- 3. **For Complex Dependencies**: Check build logs and adjust requirements.txt
1613
- 4. **For Private APIs**: Add secrets in Space Settings (Settings → Variables and secrets)
1614
-
1615
- ### 📊 Supported Project Types:
1616
- - 🤖 Machine Learning models (PyTorch, TensorFlow, Transformers)
1617
- - 🖼️ Computer Vision applications
1618
- - 📝 NLP and text processing
1619
- - 🎵 Audio processing and generation
1620
- - 📈 Data visualization and analysis
1621
- - 🎮 Interactive demos and games
1622
- """)
1623
-
1624
- if __name__ == "__main__":
1625
- demo.launch()