Spaces:
Sleeping
Sleeping
Commit
·
12f6f51
1
Parent(s):
9b1b1f5
first commit
Browse files- README.md +23 -0
- app.py +96 -0
- requirements.txt +5 -0
README.md
CHANGED
@@ -12,3 +12,26 @@ short_description: 'This model is fine tuned on PHI-2 model with OASST1 dataset
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
15 |
+
|
16 |
+
|
17 |
+
# Phi-2 Assistant
|
18 |
+
|
19 |
+
This is a fine-tuned version of Microsoft's Phi-2 model on the OpenAssistant dataset. The model has been trained to be helpful and provide informative responses while maintaining a conversational tone.
|
20 |
+
|
21 |
+
## Features
|
22 |
+
- Pre-loaded with example prompts
|
23 |
+
- Adjustable generation parameters:
|
24 |
+
- Maximum Length: Control the length of the generated response
|
25 |
+
- Temperature: Control the randomness of the response
|
26 |
+
- Top P: Control the diversity of the response
|
27 |
+
- User-friendly interface
|
28 |
+
|
29 |
+
## Usage
|
30 |
+
1. Type your prompt in the text box or select one of the example prompts
|
31 |
+
2. Adjust the generation parameters if desired
|
32 |
+
3. Click "Submit" to generate a response
|
33 |
+
|
34 |
+
## Model Details
|
35 |
+
- Base Model: microsoft/phi-2
|
36 |
+
- Fine-tuned on: OpenAssistant/oasst1
|
37 |
+
- Training Repository: [PHI2-SFT-OASST1](https://huggingface.co/satyanayak/PHI2-SFT-OASST1)
|
app.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
from peft import PeftModel
|
4 |
+
import torch
|
5 |
+
|
6 |
+
# Load the base model and tokenizer
|
7 |
+
def load_model():
|
8 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
9 |
+
"microsoft/phi-2",
|
10 |
+
torch_dtype=torch.float16,
|
11 |
+
device_map="auto",
|
12 |
+
trust_remote_code=True
|
13 |
+
)
|
14 |
+
|
15 |
+
# Load the fine-tuned adapter
|
16 |
+
model = PeftModel.from_pretrained(
|
17 |
+
base_model,
|
18 |
+
"satyanayak/PHI2-SFT-OASST1",
|
19 |
+
torch_dtype=torch.float16,
|
20 |
+
device_map="auto"
|
21 |
+
)
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
24 |
+
"microsoft/phi-2",
|
25 |
+
trust_remote_code=True
|
26 |
+
)
|
27 |
+
return model, tokenizer
|
28 |
+
|
29 |
+
# Generate response
|
30 |
+
def generate_response(prompt, max_length=512, temperature=0.7, top_p=0.9):
|
31 |
+
inputs = tokenizer(f"Human: {prompt}\nAssistant:", return_tensors="pt").to(model.device)
|
32 |
+
|
33 |
+
outputs = model.generate(
|
34 |
+
**inputs,
|
35 |
+
max_length=max_length,
|
36 |
+
temperature=temperature,
|
37 |
+
top_p=top_p,
|
38 |
+
do_sample=True,
|
39 |
+
pad_token_id=tokenizer.eos_token_id
|
40 |
+
)
|
41 |
+
|
42 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
43 |
+
# Extract only the Assistant's response
|
44 |
+
response = response.split("Assistant:")[-1].strip()
|
45 |
+
return response
|
46 |
+
|
47 |
+
# Example prompts
|
48 |
+
EXAMPLE_PROMPTS = [
|
49 |
+
"What is the capital of France?",
|
50 |
+
"Write a short poem about autumn.",
|
51 |
+
"Explain quantum computing in simple terms.",
|
52 |
+
"Give me a recipe for chocolate chip cookies.",
|
53 |
+
"What are the benefits of regular exercise?"
|
54 |
+
]
|
55 |
+
|
56 |
+
# Load model and tokenizer
|
57 |
+
print("Loading model...")
|
58 |
+
model, tokenizer = load_model()
|
59 |
+
print("Model loaded!")
|
60 |
+
|
61 |
+
# Create Gradio interface
|
62 |
+
demo = gr.Interface(
|
63 |
+
fn=generate_response,
|
64 |
+
inputs=[
|
65 |
+
gr.Textbox(
|
66 |
+
label="Enter your prompt",
|
67 |
+
placeholder="Type your message here...",
|
68 |
+
lines=4
|
69 |
+
),
|
70 |
+
gr.Slider(
|
71 |
+
minimum=64,
|
72 |
+
maximum=1024,
|
73 |
+
value=512,
|
74 |
+
step=64,
|
75 |
+
label="Maximum Length"
|
76 |
+
),
|
77 |
+
gr.Slider(
|
78 |
+
minimum=0.1,
|
79 |
+
maximum=1.0,
|
80 |
+
value=0.7,
|
81 |
+
step=0.1,
|
82 |
+
label="Temperature"
|
83 |
+
),
|
84 |
+
gr.Slider(
|
85 |
+
minimum=0.1,
|
86 |
+
maximum=1.0,
|
87 |
+
value=0.9,
|
88 |
+
step=0.1,
|
89 |
+
label="Top P"
|
90 |
+
)
|
91 |
+
],
|
92 |
+
outputs=gr.Textbox(label="Response", lines=10),
|
93 |
+
examples=EXAMPLE_PROMPTS,
|
94 |
+
title="Phi-2 Assistant",
|
95 |
+
description="This is a fine-tuned version of Phi-2 on the OpenAssistant dataset. Enter your prompt and adjust generation parameters as needed.",
|
96 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=4.0.0
|
2 |
+
torch>=2.0.1
|
3 |
+
transformers>=4.31.0
|
4 |
+
peft>=0.4.0
|
5 |
+
accelerate>=0.21.0
|