File size: 960 Bytes
9c7e22f
0ffcc97
 
 
 
 
 
 
29d374a
3d98c15
ba13dd7
3d98c15
4ec4237
0ffcc97
3d98c15
0ffcc97
3d98c15
0ffcc97
 
3d98c15
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import streamlit as st
from llama_cpp import Llama

llm = Llama.from_pretrained(
    repo_id="Mykes/med_gemma7b_gguf",
    filename="*Q4_K_M.gguf",
    verbose=False
)

basic_prompt = "Below is the context which is your conversation history and the last user question. Write a response according the context and question. ### Context: user: Ответь мне на вопрос о моем здоровье. assistant: Конечно! Какой у Вас вопрос? ### Question: {question} ### Response:"
input_text = st.text_input('text')
model_input = basic_prompt.format(question=input_text)
if input_text:
    output = llm(
      model_input, # Prompt
      max_tokens=32, # Generate up to 32 tokens, set to None to generate up to the end of the context window
      stop=["<end_of_turn>"],
      echo=True # Echo the prompt back in the output
    ) # Generate a completion, can also call create_completion
    st.write(output["choices"][0]["text"])