Spaces:
Sleeping
Sleeping
Add Gemma 3 270M model
Browse files
app.py
CHANGED
|
@@ -14,10 +14,11 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
|
|
| 14 |
# --- Available Models ---
|
| 15 |
# We will use 'instruct' or 'chat' models because they are good following orders
|
| 16 |
AVAILABLE_MODELS = {
|
|
|
|
|
|
|
| 17 |
"Llama 3 8B Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
|
| 18 |
"Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
| 19 |
"Llama 3.2 3B Instruct": "meta-llama/Llama-3.2-3B-Instruct",
|
| 20 |
-
"Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
| 21 |
"Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 22 |
}
|
| 23 |
|
|
|
|
| 14 |
# --- Available Models ---
|
| 15 |
# We will use 'instruct' or 'chat' models because they are good following orders
|
| 16 |
AVAILABLE_MODELS = {
|
| 17 |
+
"Gemma 3 270M Instruct": "google/gemma-3-270m-it",
|
| 18 |
+
"Gemma 2 9B Instruct": "google/gemma-2-9b-it",
|
| 19 |
"Llama 3 8B Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
|
| 20 |
"Llama 3.1 8B Instruct": "meta-llama/Llama-3.1-8B-Instruct",
|
| 21 |
"Llama 3.2 3B Instruct": "meta-llama/Llama-3.2-3B-Instruct",
|
|
|
|
| 22 |
"Mistral 7B Instruct": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 23 |
}
|
| 24 |
|