Paulwalker4884 commited on
Commit
e7a08cc
·
1 Parent(s): 7ee8d4f

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +32 -31
  2. requirements.txt +1 -0
app.py CHANGED
@@ -2,40 +2,32 @@ import os
2
  import logging
3
  import sqlite3
4
  from datetime import datetime
5
- import torch
6
  import gradio as gr
7
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
8
- from huggingface_hub import login
9
 
10
  # تنظیم logging
11
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
  logger = logging.getLogger(__name__)
13
 
14
- # تنظیم توکن
15
  logger.info("Starting application setup")
16
- hf_token = os.getenv("HF_TOKEN")
17
- if not hf_token:
18
- logger.error("HF_TOKEN not set")
19
- raise ValueError("HF_TOKEN is required. Set it in Spaces Secrets (Settings > Repository secrets > Add secret > Name: HF_TOKEN).")
20
- login(token=hf_token)
21
- logger.info("Hugging Face login successful")
22
 
23
- # لود مدل GPT-Neo-1.3B
24
- logger.info("Loading EleutherAI/gpt-neo-1.3B model")
25
  try:
26
- tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B", cache_dir="/tmp/hf_cache")
27
- model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B", cache_dir="/tmp/hf_cache", torch_dtype=torch.float32, device_map="auto")
28
- code_gen = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, temperature=0.6, top_p=0.95)
29
- logger.info("EleutherAI/gpt-neo-1.3B model loaded")
 
30
  except Exception as e:
31
- logger.error(f"Failed to load GPT-Neo model: {e}")
32
  raise
33
 
34
- # آزادسازی حافظه
35
- import gc
36
- gc.collect()
37
- torch.cuda.empty_cache()
38
-
39
  # مسیر فایل دیتابیس
40
  DB_PATH = "chris.db"
41
 
@@ -103,10 +95,20 @@ def generate_code(text, language):
103
  try:
104
  prompt = f"Write a complete, correct, and well-explained code in {language} to: {text}"
105
  logger.info(f"Generated prompt: {prompt}")
106
- result = code_gen(prompt, max_new_tokens=512, temperature=0.6, top_p=0.95, do_sample=True)[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
107
 
108
- # استخراج کد از خروجی
109
- lines = result.split('\n')
110
  code = []
111
  is_code = False
112
  for line in lines:
@@ -116,14 +118,13 @@ def generate_code(text, language):
116
  elif is_code:
117
  code.append(line)
118
 
119
- code_output = "\n".join(code).strip()
120
- if not code_output:
121
  logger.warning("Generated code is empty")
122
- code_output = "خطا: کد تولیدشده خالی است"
123
 
124
- logger.info(f"Generated code: {code_output}")
125
- torch.cuda.empty_cache()
126
- return prompt, code_output
127
  except Exception as e:
128
  logger.error(f"Code generation error: {e}")
129
  return f"Error prompt: {text}", f"Code generation error: {e}"
 
2
  import logging
3
  import sqlite3
4
  from datetime import datetime
 
5
  import gradio as gr
6
+ from openai import OpenAI
 
7
 
8
  # تنظیم logging
9
  logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
10
  logger = logging.getLogger(__name__)
11
 
12
+ # تنظیم API Key و مدل
13
  logger.info("Starting application setup")
14
+ qwen_api_key = os.getenv("QWEN_API_KEY")
15
+ qwen_model = os.getenv("QWEN_MODEL", "qwen3-coder-plus") # پیش‌فرض: qwen3-coder-plus
16
+ if not qwen_api_key:
17
+ logger.error("QWEN_API_KEY not set")
18
+ raise ValueError("QWEN_API_KEY is required. Set it in Spaces Secrets (Settings > Repository secrets > Add secret > Name: QWEN_API_KEY).")
 
19
 
20
+ # تنظیم کلاینت OpenAI برای DashScope
 
21
  try:
22
+ client = OpenAI(
23
+ api_key=qwen_api_key,
24
+ base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
25
+ )
26
+ logger.info(f"Initialized OpenAI client for model {qwen_model}")
27
  except Exception as e:
28
+ logger.error(f"Failed to initialize OpenAI client: {e}")
29
  raise
30
 
 
 
 
 
 
31
  # مسیر فایل دیتابیس
32
  DB_PATH = "chris.db"
33
 
 
95
  try:
96
  prompt = f"Write a complete, correct, and well-explained code in {language} to: {text}"
97
  logger.info(f"Generated prompt: {prompt}")
98
+ response = client.chat.completions.create(
99
+ model=qwen_model,
100
+ messages=[
101
+ {"role": "system", "content": "You are a coding assistant that generates complete and well-explained code."},
102
+ {"role": "user", "content": prompt}
103
+ ],
104
+ max_tokens=1024,
105
+ temperature=0.7,
106
+ top_p=0.8
107
+ )
108
+ code_output = response.choices[0].message.content.strip()
109
 
110
+ # استخراج کد از خروجی (در صورت وجود بلاک کد)
111
+ lines = code_output.split('\n')
112
  code = []
113
  is_code = False
114
  for line in lines:
 
118
  elif is_code:
119
  code.append(line)
120
 
121
+ final_code = "\n".join(code).strip() if code else code_output
122
+ if not final_code:
123
  logger.warning("Generated code is empty")
124
+ final_code = "خطا: کد تولیدشده خالی است"
125
 
126
+ logger.info(f"Generated code: {final_code}")
127
+ return prompt, final_code
 
128
  except Exception as e:
129
  logger.error(f"Code generation error: {e}")
130
  return f"Error prompt: {text}", f"Code generation error: {e}"
requirements.txt CHANGED
@@ -10,3 +10,4 @@ numpy
10
  transformers
11
  accelerate
12
  gradio
 
 
10
  transformers
11
  accelerate
12
  gradio
13
+ openai