Ahil1991 commited on
Commit
67eb0b2
·
verified ·
1 Parent(s): 10f17c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py CHANGED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from llama_cpp import Llama
3
+
4
+ # Load your LLaMA model
5
+ llm = Llama.from_pretrained(
6
+ repo_id="Ahil1991/Bee-V.01-7B",
7
+ filename="Bee-V.01.gguf",
8
+ )
9
+
10
+ # Function to handle user input and generate a chat completion
11
+ def chat_with_model(user_input):
12
+ messages = [
13
+ {
14
+ "role": "user",
15
+ "content": user_input
16
+ }
17
+ ]
18
+
19
+ # Get response from the model
20
+ response = llm.create_chat_completion(
21
+ messages=messages
22
+ )
23
+
24
+ # Extract the content from the response
25
+ return response['choices'][0]['message']['content']
26
+
27
+ # Create a Gradio interface
28
+ iface = gr.Interface(
29
+ fn=chat_with_model, # The function to handle input
30
+ inputs="text", # Input: text from user
31
+ outputs="text", # Output: response as text
32
+ title="Chat with LLaMA Model", # Title for the Gradio app
33
+ description="Ask anything and get responses from LLaMA!"
34
+ )
35
+
36
+ # Launch the Gradio interface
37
+ iface.launch()