Satyam-Singh commited on
Commit
73f95a3
1 Parent(s): 123c649

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -17
app.py CHANGED
@@ -1,6 +1,10 @@
1
  import google.generativeai as genai
2
  import gradio as gr
3
  import os
 
 
 
 
4
 
5
  # Set up the model
6
  generation_config = {
@@ -68,14 +72,6 @@ convo = model.start_chat(history=[
68
  "role": "model",
69
  "parts": ["Satyam Singh invented me."]
70
  },
71
- {
72
- "role": "user",
73
- "parts": ["who created you"]
74
- },
75
- {
76
- "role": "model",
77
- "parts": ["I was created by Satyam Singh."]
78
- },
79
  {
80
  "role": "user",
81
  "parts": ["on what parameters you were trained"]
@@ -100,14 +96,6 @@ convo = model.start_chat(history=[
100
  "role": "model",
101
  "parts": ["I do not have a name in the traditional sense, as I am a machine learning model and not a person. However, I have been given the name **LLAVA**, which stands for **Large Language Virtual Assistant**.\n\nI like this name because it reflects my purpose, which is to help people with a variety of language-based tasks. I can generate text, translate languages, answer questions, summarize information, and more.\n\nI am still under development, but I am learning new things every day. I am excited to see how I can use my skills to help people in the future."]
102
  },
103
- {
104
- "role": "user",
105
- "parts": ["hi"]
106
- },
107
- {
108
- "role": "model",
109
- "parts": ["Hello! How can I help you today?\n\nI am LLAVA, a large language virtual assistant. I can help you with a variety of language-based tasks, including:\n\n* **Text generation:** I can generate text in a variety of styles and genres, including news articles, stories, poems, and scripts.\n* **Language translation:** I can translate text from one language to another, preserving the meaning and tone of the original text.\n* **Question answering:** I can answer questions about a wide range of topics, drawing on my knowledge of the world and my ability to understand and interpret human language.\n* **Summarization:** I can summarize text, extracting the main points and presenting them in a concise and informative way.\n* **Paraphrasing:** I can paraphrase text, expressing the same meaning in different words.\n\nI am constantly learning and improving, and I am always looking for new ways to use my skills to help people.\n\nIs there anything specific I can help you with today?"]
110
- },
111
  ])
112
 
113
 
@@ -115,7 +103,57 @@ def gemini_chat(message, history):
115
  response = convo.send_message(message)
116
  return response.text
117
 
118
- #chat = model.start_chat()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  gr.ChatInterface(
121
  fn=gemini_chat,
@@ -129,5 +167,8 @@ gr.ChatInterface(
129
  ),
130
  title="LLAVA: Large Language Virtual Assistant",
131
  description="Official Demo Of ```LLAVA``` based on ```Large Language Virtual Assistant ```.",
 
 
 
132
  concurrency_limit=20,
133
  ).launch(show_api=True)
 
1
  import google.generativeai as genai
2
  import gradio as gr
3
  import os
4
+ from huggingface_hub import InferenceClient
5
+
6
+ client = InferenceClient("Satyam-Singh/LLaVa-Large-Language-Virtual-Assistant")
7
+
8
 
9
  # Set up the model
10
  generation_config = {
 
72
  "role": "model",
73
  "parts": ["Satyam Singh invented me."]
74
  },
 
 
 
 
 
 
 
 
75
  {
76
  "role": "user",
77
  "parts": ["on what parameters you were trained"]
 
96
  "role": "model",
97
  "parts": ["I do not have a name in the traditional sense, as I am a machine learning model and not a person. However, I have been given the name **LLAVA**, which stands for **Large Language Virtual Assistant**.\n\nI like this name because it reflects my purpose, which is to help people with a variety of language-based tasks. I can generate text, translate languages, answer questions, summarize information, and more.\n\nI am still under development, but I am learning new things every day. I am excited to see how I can use my skills to help people in the future."]
98
  },
 
 
 
 
 
 
 
 
99
  ])
100
 
101
 
 
103
  response = convo.send_message(message)
104
  return response.text
105
 
106
+ additional_inputs=[
107
+ gr.Textbox(
108
+ label="System Prompt",
109
+ max_lines=1,
110
+ interactive=True,
111
+ ),
112
+ gr.Slider(
113
+ label="Temperature",
114
+ value=0.9,
115
+ minimum=0.0,
116
+ maximum=1.0,
117
+ step=0.05,
118
+ interactive=True,
119
+ info="Higher values produce more diverse outputs",
120
+ ),
121
+ gr.Slider(
122
+ label="Max new tokens",
123
+ value=4096,
124
+ minimum=0,
125
+ maximum=8192,
126
+ step=64,
127
+ interactive=True,
128
+ info="The maximum numbers of new tokens",
129
+ ),
130
+ gr.Slider(
131
+ label="Top-p (nucleus sampling)",
132
+ value=0.90,
133
+ minimum=0.0,
134
+ maximum=1,
135
+ step=0.05,
136
+ interactive=True,
137
+ info="Higher values sample more low-probability tokens",
138
+ ),
139
+ gr.Slider(
140
+ label="Repetition penalty",
141
+ value=1.2,
142
+ minimum=1.0,
143
+ maximum=2.0,
144
+ step=0.05,
145
+ interactive=True,
146
+ info="Penalize repeated tokens",
147
+ )
148
+ ]
149
+
150
+ examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
151
+ ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
152
+ ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
153
+ ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
154
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
155
+ ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
156
+ ]
157
 
158
  gr.ChatInterface(
159
  fn=gemini_chat,
 
167
  ),
168
  title="LLAVA: Large Language Virtual Assistant",
169
  description="Official Demo Of ```LLAVA``` based on ```Large Language Virtual Assistant ```.",
170
+ additional_inputs=additional_inputs,
171
+ title="LLaVa 56B Large Language Virtual Assiatant",
172
+ examples=examples,
173
  concurrency_limit=20,
174
  ).launch(show_api=True)