petrsovadina
commited on
Commit
•
5a6f959
1
Parent(s):
73c4b5f
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,38 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
# Načtení modelu a tokenizeru
|
5 |
model_name = "m42-health/Llama3-Med42-8B"
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
7 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
#
|
12 |
-
user_input = st.text_input("You:", "")
|
13 |
-
if user_input:
|
14 |
messages = [
|
15 |
{"role": "system", "content": (
|
16 |
"You are a helpful, respectful and honest medical assistant. "
|
@@ -29,4 +51,7 @@ if user_input:
|
|
29 |
output_ids = model.generate(input_ids, max_length=512, do_sample=True, temperature=0.4, top_k=150, top_p=0.75)
|
30 |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
31 |
|
32 |
-
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
|
4 |
+
# Show title and description.
|
5 |
+
st.title("💬 Healthcare Chatbot")
|
6 |
+
st.write(
|
7 |
+
"This is a simple chatbot that uses the Llama3-Med42-8B model to generate responses. "
|
8 |
+
"To use this app, simply type your question in the input field below."
|
9 |
+
)
|
10 |
+
|
11 |
+
# Create a session state variable to store the chat messages. This ensures that the
|
12 |
+
# messages persist across reruns.
|
13 |
+
if "messages" not in st.session_state:
|
14 |
+
st.session_state.messages = []
|
15 |
+
|
16 |
+
# Display the existing chat messages via `st.chat_message`.
|
17 |
+
for message in st.session_state.messages:
|
18 |
+
with st.chat_message(message["role"]):
|
19 |
+
st.markdown(message["content"])
|
20 |
+
|
21 |
# Načtení modelu a tokenizeru
|
22 |
model_name = "m42-health/Llama3-Med42-8B"
|
23 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
24 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
25 |
|
26 |
+
# Create a chat input field to allow the user to enter a message. This will display
|
27 |
+
# automatically at the bottom of the page.
|
28 |
+
if user_input := st.chat_input("What is up?"):
|
29 |
+
|
30 |
+
# Store and display the current prompt.
|
31 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
32 |
+
with st.chat_message("user"):
|
33 |
+
st.markdown(user_input)
|
34 |
|
35 |
+
# Prepare input for the model
|
|
|
|
|
36 |
messages = [
|
37 |
{"role": "system", "content": (
|
38 |
"You are a helpful, respectful and honest medical assistant. "
|
|
|
51 |
output_ids = model.generate(input_ids, max_length=512, do_sample=True, temperature=0.4, top_k=150, top_p=0.75)
|
52 |
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
53 |
|
54 |
+
# Display the model's response
|
55 |
+
with st.chat_message("assistant"):
|
56 |
+
st.markdown(response[len(input_text):])
|
57 |
+
st.session_state.messages.append({"role": "assistant", "content": response[len(input_text):]})
|