Matteo Peluso
commited on
Commit
·
49c5855
1
Parent(s):
c8e874d
Utils and such
Browse files
app.py
CHANGED
|
@@ -1,4 +1,20 @@
|
|
| 1 |
import streamlit as st
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from utils import validate_sequence, predict
|
| 3 |
+
from model import model
|
| 4 |
|
| 5 |
+
def main():
|
| 6 |
+
st.title("AminoAnalytica - ML Experience")
|
| 7 |
+
|
| 8 |
+
# User input
|
| 9 |
+
sequence = st.text_input("Enter your amino acid sequence:")
|
| 10 |
+
|
| 11 |
+
if st.button("Analyze Sequence"):
|
| 12 |
+
if validate_sequence(sequence):
|
| 13 |
+
prediction = predict(model, sequence)
|
| 14 |
+
st.write("### Results")
|
| 15 |
+
st.table({"Property": ["Solubility"], "Model Output": [prediction]})
|
| 16 |
+
else:
|
| 17 |
+
st.error("Invalid sequence. Please enter a valid amino acid sequence of up to 200 characters.")
|
| 18 |
+
|
| 19 |
+
if __name__ == "__main__":
|
| 20 |
+
main()
|
model.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file could be used for more complex model handling if needed.
|
| 2 |
+
# For simplicity, it's not used extensively in this basic example.
|
| 3 |
+
from utils import load_model
|
| 4 |
+
|
| 5 |
+
model = load_model()
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
torch
|
utils.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
def validate_sequence(sequence):
|
| 4 |
+
valid_amino_acids = set("ACDEFGHIKLMNPQRSTVWY") # 20 standard amino acids
|
| 5 |
+
return all(aa in valid_amino_acids for aa in sequence) and len(sequence) <= 200
|
| 6 |
+
|
| 7 |
+
def load_model():
|
| 8 |
+
# Assuming the model is a simple PyTorch model, adjust the path as needed
|
| 9 |
+
model = torch.load('model.pth')
|
| 10 |
+
model.eval()
|
| 11 |
+
return model
|
| 12 |
+
|
| 13 |
+
def predict(model, sequence):
|
| 14 |
+
# Dummy tensor conversion, replace with your actual model's input handling
|
| 15 |
+
tensor = torch.tensor([ord(char) for char in sequence], dtype=torch.float32)
|
| 16 |
+
output = model(tensor)
|
| 17 |
+
return output.item()
|