curry tang commited on
Commit
d9cc5c0
·
1 Parent(s): bffe050

add logger

Browse files
Files changed (4) hide show
  1. .gitignore +1 -0
  2. app.py +7 -5
  3. banner.py +0 -3
  4. log.py +10 -0
.gitignore CHANGED
@@ -10,3 +10,4 @@ wheels/
10
  .venv
11
  .idea
12
  .env.local
 
 
10
  .venv
11
  .idea
12
  .env.local
13
+ app.log
app.py CHANGED
@@ -6,8 +6,10 @@ import base64
6
  from PIL import Image
7
  import io
8
  from prompts import web_prompt, explain_code_template, optimize_code_template, debug_code_template, function_gen_template, translate_doc_template, backend_developer_prompt, analyst_prompt
9
- from banner import banner_md
10
  from langchain_core.prompts import PromptTemplate
 
 
 
11
 
12
 
13
  deep_seek_llm = DeepSeekLLM(api_key=settings.deepseek_api_key)
@@ -27,8 +29,8 @@ def get_default_chat():
27
  return _llm.get_chat_engine()
28
 
29
 
30
- def predict(message, history, _chat, _current_assistant):
31
- print('!!!!!', message, history, _chat, _current_assistant)
32
  files_len = len(message.files)
33
  if _chat is None:
34
  _chat = get_default_chat()
@@ -60,6 +62,7 @@ def predict(message, history, _chat, _current_assistant):
60
  {"type": "text", "text": message.text},
61
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
62
  ]))
 
63
 
64
  response_message = ''
65
  for chunk in _chat.stream(_lc_history):
@@ -68,7 +71,6 @@ def predict(message, history, _chat, _current_assistant):
68
 
69
 
70
  def update_chat(_provider: str, _model: str, _temperature: float, _max_tokens: int):
71
- print('?????', _provider, _model, _temperature, _max_tokens)
72
  _config_llm = provider_model_map[_provider]
73
  return _config_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
74
 
@@ -154,7 +156,7 @@ with gr.Blocks() as app:
154
  chat_engine = gr.State(value=None)
155
  current_assistant = gr.State(value='前端开发助手')
156
  with gr.Row(variant='panel'):
157
- gr.Markdown(banner_md)
158
  with gr.Accordion('模型参数设置', open=False):
159
  with gr.Row():
160
  provider = gr.Dropdown(
 
6
  from PIL import Image
7
  import io
8
  from prompts import web_prompt, explain_code_template, optimize_code_template, debug_code_template, function_gen_template, translate_doc_template, backend_developer_prompt, analyst_prompt
 
9
  from langchain_core.prompts import PromptTemplate
10
+ from log import logging
11
+
12
+ logger = logging.getLogger(__name__)
13
 
14
 
15
  deep_seek_llm = DeepSeekLLM(api_key=settings.deepseek_api_key)
 
29
  return _llm.get_chat_engine()
30
 
31
 
32
+ def predict(message, history, _chat, _current_assistant: str):
33
+ logger.info(f"chat predict: {message}, {history}, {_chat}, {_current_assistant}")
34
  files_len = len(message.files)
35
  if _chat is None:
36
  _chat = get_default_chat()
 
62
  {"type": "text", "text": message.text},
63
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
64
  ]))
65
+ logger.info(f"chat history: {_lc_history}")
66
 
67
  response_message = ''
68
  for chunk in _chat.stream(_lc_history):
 
71
 
72
 
73
  def update_chat(_provider: str, _model: str, _temperature: float, _max_tokens: int):
 
74
  _config_llm = provider_model_map[_provider]
75
  return _config_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
76
 
 
156
  chat_engine = gr.State(value=None)
157
  current_assistant = gr.State(value='前端开发助手')
158
  with gr.Row(variant='panel'):
159
+ gr.Markdown("## 智能编出助手")
160
  with gr.Accordion('模型参数设置', open=False):
161
  with gr.Row():
162
  provider = gr.Dropdown(
banner.py DELETED
@@ -1,3 +0,0 @@
1
- banner_md = '''
2
- ## 前端智能代码助手
3
- '''
 
 
 
 
log.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ logging.basicConfig(
4
+ level=logging.INFO, # 设置日志级别
5
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
6
+ handlers=[
7
+ logging.FileHandler('app.log'),
8
+ logging.StreamHandler(),
9
+ ]
10
+ )