johann22 commited on
Commit
001008d
·
1 Parent(s): 545e126

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -2,6 +2,8 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  import random
4
  import prompts
 
 
5
  client = InferenceClient(
6
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
7
  )
@@ -95,8 +97,8 @@ def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0
95
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
96
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
97
  output = ""
98
- if history:
99
- yield history
100
 
101
  for response in stream:
102
  output += response.token.text
@@ -108,8 +110,13 @@ def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0
108
  print ( f'Prompt:: {prompt}')
109
  print ( f'output:: {output}')
110
  print ( f'history:: {history}')
111
-
112
-
 
 
 
 
 
113
 
114
 
115
  return prompt, history
 
2
  import gradio as gr
3
  import random
4
  import prompts
5
+ import json
6
+
7
  client = InferenceClient(
8
  "mistralai/Mixtral-8x7B-Instruct-v0.1"
9
  )
 
97
  formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
98
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
99
  output = ""
100
+ #if history:
101
+ # yield history
102
 
103
  for response in stream:
104
  output += response.token.text
 
110
  print ( f'Prompt:: {prompt}')
111
  print ( f'output:: {output}')
112
  print ( f'history:: {history}')
113
+
114
+ with open('tmp.json', 'w') as f:
115
+ json.dump(history, f)
116
+ f.close()
117
+ with open('tmp.json', 'r') as f:
118
+ load_data=json.load(f)
119
+ print (load_data)
120
 
121
 
122
  return prompt, history