Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	| import subprocess | |
| import sys | |
| import shlex | |
| import spaces | |
| import torch | |
| import uuid | |
| import os | |
| import json | |
| from pathlib import Path | |
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
| from threading import Thread | |
| # install packages for mamba | |
| def install_mamba(): | |
| subprocess.run(shlex.split("pip install https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.4.0/causal_conv1d-1.4.0+cu122torch2.3cxx11abiFALSE-cp310-cp310-linux_x86_64.whl")) | |
| subprocess.run(shlex.split("pip install https://github.com/state-spaces/mamba/releases/download/v2.2.2/mamba_ssm-2.2.2+cu122torch2.3cxx11abiFALSE-cp310-cp310-linux_x86_64.whl")) | |
| install_mamba() | |
| MODEL = "tiiuae/Falcon3-Mamba-7B-Instruct" | |
| TITLE = "<h1><center>Falcon3-Mamba-7B-Instruct playground</center></h1>" | |
| SUB_TITLE = """<center>Playground of Falcon3-Mamba-7B-Instruct</center>""" | |
| SYSTEM_PROMPT = os.getenv('SYSTEM_PROMPT') | |
| CSS = """ | |
| .duplicate-button { | |
| margin: auto !important; | |
| color: white !important; | |
| background: black !important; | |
| border-radius: 100vh !important; | |
| } | |
| h3 { | |
| text-align: center; | |
| /* Fix for chat container */ | |
| .chat-container { | |
| height: 600px !important; | |
| overflow-y: auto !important; | |
| flex-direction: column !important; | |
| } | |
| .messages-container { | |
| flex-grow: 1 !important; | |
| overflow-y: auto !important; | |
| padding-right: 10px !important; | |
| } | |
| /* Ensure consistent height */ | |
| .contain { | |
| height: 100% !important; | |
| } | |
| """ | |
| END_MESSAGE = """ | |
| \n | |
| **The conversation has reached to its end, please press "Clear" to restart a new conversation** | |
| """ | |
| device = "cuda" # for GPU usage or "cpu" for CPU usage | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL, | |
| torch_dtype=torch.bfloat16, | |
| ).to(device) | |
| def stream_chat( | |
| message: str, | |
| history: list, | |
| temperature: float = 0.3, | |
| max_new_tokens: int = 100, | |
| top_p: float = 1.0, | |
| top_k: int = 20, | |
| penalty: float = 1.2, | |
| ): | |
| print(f'message: {message}') | |
| print(f'history: {history}') | |
| conversation = [] | |
| for prompt, answer in history: | |
| conversation.extend([ | |
| {"role": 'system', "content": SYSTEM_PROMPT }, | |
| {"role": "user", "content": prompt}, | |
| {"role": "assistant", "content": answer}, | |
| ]) | |
| conversation.append({"role": "user", "content": message}) | |
| input_text = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True) | |
| inputs = tokenizer.encode(input_text, return_tensors="pt").to(device) | |
| streamer = TextIteratorStreamer(tokenizer, timeout=40.0, skip_prompt=True, skip_special_tokens=True) | |
| generate_kwargs = dict( | |
| input_ids=inputs, | |
| max_new_tokens=max_new_tokens, | |
| do_sample=False if temperature == 0 else True, | |
| top_p=top_p, | |
| top_k=top_k, | |
| temperature=temperature, | |
| streamer=streamer, | |
| pad_token_id=11, | |
| ) | |
| with torch.no_grad(): | |
| thread = Thread(target=model.generate, kwargs=generate_kwargs) | |
| thread.start() | |
| buffer = "" | |
| for new_text in streamer: | |
| buffer += new_text | |
| buffer = buffer.replace("\nUser", "") | |
| buffer = buffer.replace("\nSystem", "") | |
| yield buffer | |
| print(f'response: {buffer}') | |
| with gr.Blocks(css=CSS, theme="soft") as demo: | |
| gr.HTML(TITLE) | |
| gr.HTML(SUB_TITLE) | |
| gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button") | |
| chat_interface = gr.ChatInterface( | |
| fn=stream_chat, | |
| chatbot=gr.Chatbot( | |
| height=600, | |
| container=True, | |
| elem_classes=["chat-container"] | |
| ), | |
| fill_height=True, | |
| additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), | |
| additional_inputs=[ | |
| gr.Slider(minimum=0, maximum=1, step=0.1, value=0.3, label="Temperature", render=False), | |
| gr.Slider(minimum=128, maximum=32768, step=1, value=1024, label="Max new tokens", render=False), | |
| gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="top_p", render=False), | |
| gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_k", render=False), | |
| gr.Slider(minimum=0.0, maximum=2.0, step=0.1, value=1.2, label="Repetition penalty", render=False), | |
| ], | |
| examples=[ | |
| ["Hello there, can you suggest few places to visit in UAE?"], | |
| ["What UAE is known for?"], | |
| ], | |
| cache_examples=False, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |

