HarshanaLF commited on
Commit
33305c7
1 Parent(s): 520872a
Files changed (2) hide show
  1. README.md +63 -1
  2. app.py +35 -27
README.md CHANGED
@@ -10,4 +10,66 @@ pinned: false
10
  short_description: Chat with AI with ⚡Lightning Speed
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  short_description: Chat with AI with ⚡Lightning Speed
11
  ---
12
 
13
+ # H GO
14
+
15
+ Inspired by Google Go, H GO is a concise and efficient chat interface that leverages various language models hosted on Hugging Face. It uses Gradio for the user interface and supports different models to cater to diverse needs.
16
+
17
+ ## Features
18
+
19
+ - **Multiple Model Support**: Choose from various models such as Nous Hermes, StarChat, Mistral, and Phi.
20
+ - **Real-time Interaction**: Get quick and concise responses from the selected model.
21
+ - **Customizable**: Easily switch models to suit your specific requirements.
22
+
23
+ ## Setup
24
+
25
+ ### Prerequisites
26
+
27
+ - Python 3.7+
28
+ - Gradio
29
+ - Hugging Face Hub
30
+ - Git LFS (Large File Storage)
31
+
32
+ ### Installation
33
+
34
+ 1. Clone the repository:
35
+
36
+ ```bash
37
+ # Make sure you have git-lfs installed (https://git-lfs.com)
38
+ git lfs install
39
+
40
+ # When prompted for a password, use an access token with write permissions.
41
+ # Generate one from your settings: https://huggingface.co/settings/tokens
42
+ git clone https://huggingface.co/spaces/HarshanaLF/Real-Time-Chat-with-AI
43
+
44
+ # If you want to clone without large files - just their pointers
45
+ GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/spaces/HarshanaLF/Real-Time-Chat-with-AI
46
+ ```
47
+
48
+ 2. Navigate to the project directory:
49
+
50
+ ```bash
51
+ cd Real-Time-Chat-with-AI
52
+ ```
53
+
54
+ 3. Install the required dependencies:
55
+
56
+ ```bash
57
+ pip install gradio huggingface_hub
58
+ ```
59
+
60
+ ## Usage
61
+
62
+ Run the application using the following command:
63
+
64
+ ```bash
65
+ python app.py
66
+
67
+
68
+ ## Model Descriptions
69
+
70
+ - **Nous Hermes Mixtral 8x7B DPO**: A robust model designed for detailed and nuanced conversation handling.
71
+ - **StarChat2 15b**: A large-scale model optimized for general chat interactions with a wide range of topics.
72
+ - **Mistral 7B v0.3**: A smaller, efficient model suitable for fast and responsive chat applications.
73
+ - **Phi 3 mini**: A compact model focusing on instructive and concise responses.
74
+ - **Mixtral 8x7B**: A versatile model capable of handling various conversational contexts effectively.
75
+ ```
app.py CHANGED
@@ -1,44 +1,52 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
-
5
  def client_fn(model):
6
- if "Nous" in model:
7
- return InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
8
- elif "Star" in model:
9
- return InferenceClient("HuggingFaceH4/starchat2-15b-v0.1")
10
- elif "Mistral" in model:
11
- return InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
12
- elif "Phi" in model:
13
- return InferenceClient("microsoft/Phi-3-mini-4k-instruct")
14
- else:
15
- return InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
16
 
17
- system_instructions1 = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
 
18
 
19
- def models(text, model="Mixtral 8x7B"):
20
-
21
  client = client_fn(model)
 
 
 
 
22
 
23
- generate_kwargs = dict(
24
- max_new_tokens=100,
25
- do_sample=True,
26
- )
27
-
28
- formatted_prompt = system_instructions1 + text + "[ANSWER]"
29
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
30
  output = ""
31
- for response in stream:
32
- output+=response.token.text
33
- if output.endswith("<|assistant|>"):
34
- output = output[:-13]
35
- elif output.endswith("</s>"):
36
  output = output[:-4]
37
  return output
38
 
39
- description="""# Chat GO
 
40
  ### Inspired from Google Go"""
41
 
42
- demo = gr.Interface(description=description,fn=models, inputs=["text", gr.Dropdown([ 'Mixtral 8x7B','Nous Hermes Mixtral 8x7B DPO','StarChat2 15b','Mistral 7B v0.3','Phi 3 mini', ], value="Mistral 7B v0.3", label="Select Model") ], outputs="text", live=True, batch=True, max_batch_size=10000)
 
 
 
 
 
 
 
 
 
 
43
  demo.queue(max_size=300000)
44
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Function to return the appropriate client based on the model selected
5
  def client_fn(model):
6
+ model_map = {
7
+ "Nous Hermes Mixtral 8x7B DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
8
+ "StarChat2 15b": "HuggingFaceH4/starchat2-15b-v0.1",
9
+ "Mistral 7B v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
10
+ "Phi 3 mini": "microsoft/Phi-3-mini-4k-instruct",
11
+ "Mixtral 8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
12
+ }
13
+ return InferenceClient(model_map.get(model, "mistralai/Mixtral-8x7B-Instruct-v0.1"))
 
 
14
 
15
+ # System instructions for the models to follow
16
+ system_instructions = "[SYSTEM] Your task is to Answer the question. Keep conversation very short, clear and concise. The expectation is that you will avoid introductions and start answering the query directly, Only answer the question asked by user, Do not say unnecessary things.[QUESTION]"
17
 
18
+ # Function to generate model responses
19
+ def models(text, model="Mixtral 8x7B"):
20
  client = client_fn(model)
21
+ generate_kwargs = {
22
+ "max_new_tokens": 100,
23
+ "do_sample": True,
24
+ }
25
 
26
+ formatted_prompt = f"{system_instructions} {text} [ANSWER]"
 
 
 
 
 
27
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
28
+
29
  output = ""
30
+ for response in stream:
31
+ output += response.token.text
32
+ if output.endswith("</s>"):
 
 
33
  output = output[:-4]
34
  return output
35
 
36
+ # Gradio interface description and configuration
37
+ description = """# H GO
38
  ### Inspired from Google Go"""
39
 
40
+ demo = gr.Interface(
41
+ description=description,
42
+ fn=models,
43
+ inputs=["text", gr.Dropdown(['Mixtral 8x7B', 'Nous Hermes Mixtral 8x7B DPO', 'StarChat2 15b', 'Mistral 7B v0.3', 'Phi 3 mini'], value="Mistral 7B v0.3", label="Select Model")],
44
+ outputs="text",
45
+ live=True,
46
+ batch=True,
47
+ max_batch_size=10000
48
+ )
49
+
50
+ # Queue and launch configuration for Gradio
51
  demo.queue(max_size=300000)
52
  demo.launch()