ayyuce commited on
Commit
115a34b
·
verified ·
1 Parent(s): 75c9fa7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,11 +1,12 @@
1
  import streamlit as st
2
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
  @st.cache_resource(show_spinner=False)
5
  def load_generator():
6
  model_name = "ayyuce/NeoProtein-GPT"
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cpu")
9
  gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
  return gen_pipeline
11
 
@@ -19,4 +20,4 @@ if st.button("Generate Protein Sequence"):
19
  outputs = load_generator()(user_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
20
  generated_text = outputs[0]["generated_text"]
21
  st.subheader("Generated Sequence:")
22
- st.code(generated_text, language="python")
 
1
  import streamlit as st
2
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  @st.cache_resource(show_spinner=False)
5
  def load_generator():
6
  model_name = "ayyuce/NeoProtein-GPT"
7
+ config = AutoConfig.from_pretrained(model_name, model_type="gpt2")
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name, config=config, device_map="cpu")
10
  gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
  return gen_pipeline
12
 
 
20
  outputs = load_generator()(user_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
21
  generated_text = outputs[0]["generated_text"]
22
  st.subheader("Generated Sequence:")
23
+ st.code(generated_text, language="python")