Canstralian commited on
Commit
4f8607d
·
verified ·
1 Parent(s): 0b0c037

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -67
app.py CHANGED
@@ -1,67 +1,60 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- # Initialize the Inference Client
5
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
-
7
- def respond(
8
- message,
9
- history: list[tuple[str, str]],
10
- system_message,
11
- max_tokens,
12
- temperature,
13
- top_p,
14
- ):
15
- system_message = system_message or "You are a friendly Python code writing Chatbot. Assist with Python programming tasks, debugging, and code optimization. Provide solutions for Python-related queries, help with libraries, algorithms, and best practices, and generate clean, efficient code for various applications."
16
-
17
- messages = [{"role": "system", "content": system_message}]
18
- for val in history:
19
- if val[0]:
20
- messages.append({"role": "user", "content": val[0]})
21
- if val[1]:
22
- messages.append({"role": "assistant", "content": val[1]})
23
-
24
- messages.append({"role": "user", "content": message})
25
-
26
- response = ""
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- token = message.choices[0].delta.content
35
- response += token
36
- yield response
37
-
38
-
39
- def clear_chat():
40
- return [], ""
41
-
42
- # Chatbot Interface
43
- demo = gr.Interface(
44
- fn=respond,
45
- inputs=[
46
- gr.Textbox(value="You are a friendly Python code writing Chatbot. Assist with Python programming tasks, debugging, and code optimization. Provide solutions for Python-related queries, help with libraries, algorithms, and best practices, and generate clean, efficient code for various applications.", label="System message"),
47
- gr.Textbox(placeholder="Enter your message here...", label="User Input", lines=2),
48
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
49
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
50
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
51
- ],
52
- outputs=[
53
- gr.Chatbot(label="Chat History"),
54
- gr.Textbox(label="Current Response", placeholder="Chatbot will reply here...", interactive=False)
55
- ],
56
- live=True,
57
- allow_flagging="never", # Disable flagging
58
- layout="vertical",
59
- theme="huggingface", # Optionally, set a different theme
60
- )
61
-
62
- # Add additional features to the interface
63
- with demo:
64
- gr.Button("Clear Chat", elem_id="clear-chat").click(clear_chat, outputs=["chatbot", "textbox"])
65
-
66
- if __name__ == "__main__":
67
- demo.launch()
 
1
+ import os
2
+ import streamlit as st
3
+ from huggingface_hub import HfApi, SpaceHardware
4
+
5
+ # Set up Hugging Face API token and Space ID
6
+ HF_TOKEN = os.getenv("HF_TOKEN") # Ensure your Hugging Face token is set as a secret
7
+ TRAINING_SPACE_ID = "your_space_id_here" # Replace with your actual space ID
8
+
9
+ # Initialize Hugging Face API
10
+ api = HfApi(token=HF_TOKEN)
11
+
12
+ # Function to check for a scheduled task (this is a placeholder for your actual task-checking logic)
13
+ def get_task():
14
+ # You can implement logic here to check for scheduled tasks
15
+ return None # For example, return None if no task is scheduled
16
+
17
+ # Function to add a new task (you can implement this depending on your use case)
18
+ def add_task(task):
19
+ # Logic to add a new task
20
+ st.write(f"Task '{task}' added!")
21
+
22
+ # Function to mark the task as "DONE" (this is a placeholder)
23
+ def mark_as_done(task):
24
+ # Mark the task as done once it's completed
25
+ st.write(f"Task '{task}' completed!")
26
+
27
+ # Function to simulate training the model (replace with actual training logic)
28
+ def train_and_upload(task):
29
+ # Implement your model training logic here
30
+ st.write(f"Training model with task: {task}")
31
+
32
+ # Check if there’s an existing task
33
+ task = get_task()
34
+
35
+ if task is None:
36
+ # Display Gradio interface to request a new task
37
+ def gradio_fn(task):
38
+ # On user request, add task and request hardware
39
+ add_task(task)
40
+ api.request_space_hardware(repo_id=TRAINING_SPACE_ID, hardware=SpaceHardware.T4_MEDIUM)
41
+
42
+ # Use Streamlit to request a task (Gradio interface or a simple button to simulate this)
43
+ task_input = st.text_input("Enter task name", "")
44
+ if st.button("Request Task"):
45
+ gradio_fn(task_input)
46
+ else:
47
+ # If a task is available, check for hardware
48
+ runtime = api.get_space_runtime(repo_id=TRAINING_SPACE_ID)
49
+ if runtime.hardware == SpaceHardware.T4_MEDIUM:
50
+ # Fine-tune model on GPU if available
51
+ train_and_upload(task)
52
+
53
+ # Mark task as "DONE" after training
54
+ mark_as_done(task)
55
+
56
+ # Reset to CPU hardware after training
57
+ api.request_space_hardware(repo_id=TRAINING_SPACE_ID, hardware=SpaceHardware.CPU_BASIC)
58
+ else:
59
+ # If GPU hardware is not available, request it
60
+ api.request_space_hardware(repo_id=TRAINING_SPACE_ID, hardware=SpaceHardware.T4_MEDIUM)