petrsovadina commited on
Commit
d1a5ff0
1 Parent(s): f6ca63b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Načtení modelu a tokenizeru
5
+ model_name = "m42-health/Llama3-Med42-8B"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ st.title('Healthcare Chatbot')
10
+
11
+ # Uživatelský vstup
12
+ user_input = st.text_input("You:", "")
13
+ if user_input:
14
+ messages = [
15
+ {"role": "system", "content": (
16
+ "You are a helpful, respectful and honest medical assistant. "
17
+ "Always answer as helpfully as possible, while being safe. "
18
+ "Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. "
19
+ "Please ensure that your responses are socially unbiased and positive in nature. "
20
+ "If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. "
21
+ "If you don’t know the answer to a question, please don’t share false information."
22
+ )},
23
+ {"role": "user", "content": user_input}
24
+ ]
25
+ input_text = " ".join([f"{message['role']}: {message['content']}" for message in messages])
26
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
27
+
28
+ # Vygenerování odpovědi
29
+ output_ids = model.generate(input_ids, max_length=512, do_sample=True, temperature=0.4, top_k=150, top_p=0.75)
30
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
31
+
32
+ st.text_area("Bot:", response[len(input_text):])