alaamostafa commited on
Commit
f1c9a13
·
verified ·
1 Parent(s): 438cd50

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -53
app.py CHANGED
@@ -1,60 +1,22 @@
1
  import gradio as gr
2
- from unsloth import FastLanguageModel
3
  import torch
4
 
5
- # Load your model
6
- model, tokenizer = FastLanguageModel.from_pretrained(
7
- model_name="alaamostafa/Mistral-7B-Unsloth",
8
- max_seq_length=2048,
9
- load_in_4bit=True,
10
- )
11
 
12
- # Enable faster inference
13
- FastLanguageModel.for_inference(model)
 
 
14
 
15
- # Set up chat template
16
- from unsloth.chat_templates import get_chat_template
17
- tokenizer = get_chat_template(
18
- tokenizer,
19
- chat_template="chatml",
20
- mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"},
21
- map_eos_token=True,
22
- )
23
-
24
- # Neuroscience example prompts
25
- example_prompts = [
26
- "Recent advances in neuroimaging suggest that",
27
- "The role of dopamine in learning and memory involves",
28
- "Explain the concept of neuroplasticity in simple terms",
29
- "What are the key differences between neurons and glial cells?"
30
- ]
31
-
32
- # Text generation function with parameters
33
- def generate_text(prompt, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
34
- # Prepare input
35
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
36
-
37
- # Generate response
38
- outputs = model.generate(
39
- input_ids=inputs["input_ids"],
40
- max_new_tokens=int(max_new_tokens),
41
- temperature=float(temperature),
42
- top_p=float(top_p),
43
- top_k=int(top_k),
44
- repetition_penalty=float(repetition_penalty),
45
- use_cache=True
46
- )
47
-
48
- response = tokenizer.decode(outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True)
49
- return response
50
-
51
- # Create Gradio interface with layout matching your second image
52
  with gr.Blocks() as demo:
53
  with gr.Row():
 
54
  with gr.Column():
55
  prompt = gr.Textbox(
56
  label="Enter your prompt",
57
- value="",
58
  lines=5
59
  )
60
 
@@ -88,12 +50,26 @@ with gr.Blocks() as demo:
88
  minimum=1.0, maximum=2.0, value=1.1, step=0.1
89
  )
90
 
 
91
  with gr.Column():
92
  output = gr.Textbox(
93
  label="Generated Text",
94
  lines=20
95
  )
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  # Set up event handlers
98
  generate_btn.click(
99
  fn=generate_text,
@@ -106,12 +82,6 @@ with gr.Blocks() as demo:
106
  inputs=None,
107
  outputs=[prompt, output]
108
  )
109
-
110
- # Example prompts
111
- gr.Examples(
112
- examples=example_prompts,
113
- inputs=prompt
114
- )
115
 
116
  # Launch the app
117
  demo.launch()
 
1
  import gradio as gr
 
2
  import torch
3
 
4
+ # This is a simplified version showing just the UI structure
5
+ # You'll need to integrate your actual model loading and generation code
 
 
 
 
6
 
7
+ def generate_text(prompt, max_length, temperature, top_p, top_k, repetition_penalty):
8
+ # Your text generation function would go here
9
+ # This is just a placeholder that returns the prompt as a demonstration
10
+ return f"Generated text based on: {prompt}"
11
 
12
+ # Create the interface with a layout matching image 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  with gr.Blocks() as demo:
14
  with gr.Row():
15
+ # Left column - Input
16
  with gr.Column():
17
  prompt = gr.Textbox(
18
  label="Enter your prompt",
19
+ placeholder="Type your prompt here...",
20
  lines=5
21
  )
22
 
 
50
  minimum=1.0, maximum=2.0, value=1.1, step=0.1
51
  )
52
 
53
+ # Right column - Output
54
  with gr.Column():
55
  output = gr.Textbox(
56
  label="Generated Text",
57
  lines=20
58
  )
59
 
60
+ # Example prompts
61
+ example_prompts = [
62
+ "Recent advances in neuroimaging suggest that",
63
+ "The role of dopamine in learning and memory involves",
64
+ "Explain the concept of neuroplasticity in simple terms",
65
+ "What are the key differences between neurons and glial cells?"
66
+ ]
67
+
68
+ gr.Examples(
69
+ examples=example_prompts,
70
+ inputs=prompt
71
+ )
72
+
73
  # Set up event handlers
74
  generate_btn.click(
75
  fn=generate_text,
 
82
  inputs=None,
83
  outputs=[prompt, output]
84
  )
 
 
 
 
 
 
85
 
86
  # Launch the app
87
  demo.launch()