curry tang commited on
Commit
bba179e
·
1 Parent(s): e42a6a3
Files changed (11) hide show
  1. .env +4 -0
  2. .gitignore +12 -0
  3. .python-version +1 -0
  4. README.md +4 -4
  5. app.py +182 -0
  6. config.py +14 -0
  7. llm.py +77 -0
  8. pyproject.toml +33 -0
  9. requirements-dev.lock +276 -0
  10. requirements.lock +276 -0
  11. requirements.txt +504 -0
.env ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ DEEP_SEEK_API_KEY=
2
+ OPEN_ROUTER_API_KEY=
3
+ TONGYI_API_KEY=
4
+ DEBUG=False
.gitignore ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # venv
10
+ .venv
11
+ .idea
12
+ .env.local
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.11.9
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Starship
3
- emoji: 📊
4
- colorFrom: purple
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 4.38.1
8
  app_file: app.py
 
1
  ---
2
+ title: startship
3
+ emoji: 👁
4
+ colorFrom: indigo
5
+ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 4.38.1
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain_core.messages import HumanMessage, AIMessage
3
+ from llm import DeepSeekLLM, OpenRouterLLM, TongYiLLM
4
+ from config import settings
5
+
6
+
7
+ deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key)
8
+ open_router_llm = OpenRouterLLM(api_key=settings.open_router_api_key)
9
+ tongyi_llm = TongYiLLM(api_key=settings.tongyi_api_key)
10
+
11
+
12
+ def init_chat():
13
+ return deep_seek_llm.get_chat_engine()
14
+
15
+
16
+ def predict(message, history, chat):
17
+ if chat is None:
18
+ chat = init_chat()
19
+ history_messages = []
20
+ for human, assistant in history:
21
+ history_messages.append(HumanMessage(content=human))
22
+ history_messages.append(AIMessage(content=assistant))
23
+ history_messages.append(HumanMessage(content=message.text))
24
+
25
+ response_message = ''
26
+ for chunk in chat.stream(history_messages):
27
+ response_message = response_message + chunk.content
28
+ yield response_message
29
+
30
+
31
+ def update_chat(_provider: str, _chat, _model: str, _temperature: float, _max_tokens: int):
32
+ print('?????', _provider, _chat, _model, _temperature, _max_tokens)
33
+ if _provider == 'DeepSeek':
34
+ _chat = deep_seek_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
35
+ if _provider == 'OpenRouter':
36
+ _chat = open_router_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
37
+ if _provider == 'Tongyi':
38
+ _chat = tongyi_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
39
+ return _chat
40
+
41
+
42
+ with gr.Blocks() as app:
43
+ with gr.Tab('聊天'):
44
+ chat_engine = gr.State(value=None)
45
+ with gr.Row():
46
+ with gr.Column(scale=2, min_width=600):
47
+ chatbot = gr.ChatInterface(
48
+ predict,
49
+ multimodal=True,
50
+ chatbot=gr.Chatbot(elem_id="chatbot", height=600, show_share_button=False),
51
+ textbox=gr.MultimodalTextbox(lines=1),
52
+ additional_inputs=[chat_engine]
53
+ )
54
+ with gr.Column(scale=1, min_width=300):
55
+ with gr.Accordion('参数设置', open=True):
56
+ with gr.Column():
57
+ provider = gr.Dropdown(
58
+ label='模型厂商',
59
+ choices=['DeepSeek', 'OpenRouter', 'Tongyi'],
60
+ value='DeepSeek',
61
+ info='不同模型厂商参数,效果和价格略有不同,请先设置好对应模型厂商的 API Key。',
62
+ )
63
+
64
+ @gr.render(inputs=provider)
65
+ def show_model_config_panel(_provider):
66
+ if _provider == 'DeepSeek':
67
+ with gr.Column():
68
+ model = gr.Dropdown(
69
+ label='模型',
70
+ choices=deep_seek_llm.support_models,
71
+ value=deep_seek_llm.default_model
72
+ )
73
+ temperature = gr.Slider(
74
+ minimum=0.0,
75
+ maximum=1.0,
76
+ step=0.1,
77
+ value=deep_seek_llm.default_temperature,
78
+ label="Temperature",
79
+ key="temperature",
80
+ )
81
+ max_tokens = gr.Slider(
82
+ minimum=1024,
83
+ maximum=1024 * 20,
84
+ step=128,
85
+ value=deep_seek_llm.default_max_tokens,
86
+ label="Max Tokens",
87
+ key="max_tokens",
88
+ )
89
+ model.change(
90
+ fn=update_chat,
91
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
92
+ outputs=[chat_engine],
93
+ )
94
+ temperature.change(
95
+ fn=update_chat,
96
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
97
+ outputs=[chat_engine],
98
+ )
99
+ max_tokens.change(
100
+ fn=update_chat,
101
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
102
+ outputs=[chat_engine],
103
+ )
104
+ if _provider == 'OpenRouter':
105
+ with gr.Column():
106
+ model = gr.Dropdown(
107
+ label='模型',
108
+ choices=open_router_llm.support_models,
109
+ value=open_router_llm.default_model
110
+ )
111
+ temperature = gr.Slider(
112
+ minimum=0.0,
113
+ maximum=1.0,
114
+ step=0.1,
115
+ value=open_router_llm.default_temperature,
116
+ label="Temperature",
117
+ key="temperature",
118
+ )
119
+ max_tokens = gr.Slider(
120
+ minimum=1024,
121
+ maximum=1024 * 20,
122
+ step=128,
123
+ value=open_router_llm.default_max_tokens,
124
+ label="Max Tokens",
125
+ key="max_tokens",
126
+ )
127
+ model.change(
128
+ fn=update_chat,
129
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
130
+ outputs=[chat_engine],
131
+ )
132
+ temperature.change(
133
+ fn=update_chat,
134
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
135
+ outputs=[chat_engine],
136
+ )
137
+ max_tokens.change(
138
+ fn=update_chat,
139
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
140
+ outputs=[chat_engine],
141
+ )
142
+ if _provider == 'Tongyi':
143
+ with gr.Column():
144
+ model = gr.Dropdown(
145
+ label='模型',
146
+ choices=tongyi_llm.support_models,
147
+ value=tongyi_llm.default_model
148
+ )
149
+ temperature = gr.Slider(
150
+ minimum=0.0,
151
+ maximum=1.0,
152
+ step=0.1,
153
+ value=tongyi_llm.default_temperature,
154
+ label="Temperature",
155
+ key="temperature",
156
+ )
157
+ max_tokens = gr.Slider(
158
+ minimum=1000,
159
+ maximum=2000,
160
+ step=100,
161
+ value=tongyi_llm.default_max_tokens,
162
+ label="Max Tokens",
163
+ key="max_tokens",
164
+ )
165
+ model.change(
166
+ fn=update_chat,
167
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
168
+ outputs=[chat_engine],
169
+ )
170
+ temperature.change(
171
+ fn=update_chat,
172
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
173
+ outputs=[chat_engine],
174
+ )
175
+ max_tokens.change(
176
+ fn=update_chat,
177
+ inputs=[provider, chat_engine, model, temperature, max_tokens],
178
+ outputs=[chat_engine],
179
+ )
180
+
181
+
182
+ app.launch(debug=settings.debug, show_api=False)
config.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_settings import BaseSettings, SettingsConfigDict
2
+
3
+
4
+ class Settings(BaseSettings):
5
+ deep_seek_api_key: str
6
+ open_router_api_key: str
7
+ tongyi_api_key: str
8
+ debug: bool
9
+
10
+ model_config = SettingsConfigDict(env_file=('.env', '.env.local'), env_file_encoding='utf-8')
11
+
12
+
13
+ settings = Settings()
14
+
llm.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from abc import ABC
3
+ from langchain_openai import ChatOpenAI
4
+
5
+
6
+ class BaseLLM(ABC):
7
+ _api_key: str
8
+ _support_models: List[str]
9
+ _default_model: str
10
+ _base_url: str
11
+ _default_temperature: float = 0.5
12
+ _default_max_tokens: int = 4096
13
+
14
+ def __init__(self, *, api_key: str):
15
+ self._api_key = api_key
16
+
17
+ @property
18
+ def support_models(self) -> List[str]:
19
+ return self._support_models
20
+
21
+ @property
22
+ def default_model(self) -> str:
23
+ return self._default_model
24
+
25
+ @property
26
+ def base_url(self) -> str:
27
+ return self._base_url
28
+
29
+ @property
30
+ def api_key(self) -> str:
31
+ return self._api_key
32
+
33
+ @property
34
+ def default_temperature(self) -> float:
35
+ return self._default_temperature
36
+
37
+ @property
38
+ def default_max_tokens(self) -> int:
39
+ return self._default_max_tokens
40
+
41
+ def get_chat_engine(self, *, model: str = None, temperature: float = None, max_tokens: int = None):
42
+ model = model or self.default_model
43
+ temperature = temperature or self.default_temperature
44
+ max_tokens = max_tokens or self.default_max_tokens
45
+ return ChatOpenAI(
46
+ model=model,
47
+ api_key=self.api_key,
48
+ base_url=self.base_url,
49
+ temperature=temperature,
50
+ max_tokens=max_tokens,
51
+ )
52
+
53
+
54
+ class DeepSeekLLM(BaseLLM):
55
+ _support_models = ['deepseek-chat', 'deepseek-coder']
56
+ _base_url = 'https://api.deepseek.com/v1'
57
+ _default_model = 'deepseek-chat'
58
+
59
+
60
+ class OpenRouterLLM(BaseLLM):
61
+ _support_models = [
62
+ 'anthropic/claude-3.5-sonnet', 'openai/gpt-4o',
63
+ 'nvidia/nemotron-4-340b-instruct', 'deepseek/deepseek-coder',
64
+ 'google/gemini-flash-1.5', 'deepseek/deepseek-chat',
65
+ 'liuhaotian/llava-yi-34b', 'qwen/qwen-110b-chat',
66
+ 'qwen/qwen-72b-chat', 'google/gemini-pro-1.5',
67
+ 'cohere/command-r-plus', 'anthropic/claude-3-haiku',
68
+ ]
69
+ _base_url = 'https://openrouter.ai/api/v1'
70
+ _default_model = 'anthropic/claude-3.5-sonnet'
71
+
72
+
73
+ class TongYiLLM(BaseLLM):
74
+ _support_models = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-long']
75
+ _default_model = 'qwen-turbo'
76
+ _base_url = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
77
+ _default_max_tokens: int = 2000
pyproject.toml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "startship"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ authors = [
6
+ { name = "twn39", email = "[email protected]" }
7
+ ]
8
+ dependencies = [
9
+ "gradio>=4.38.1",
10
+ "langchain>=0.2.7",
11
+ "pydantic>=2.8.2",
12
+ "pydantic-settings>=2.3.4",
13
+ "langchain-openai>=0.1.16",
14
+ "dashscope>=1.20.1",
15
+ "setuptools==69.5.1",
16
+ ]
17
+ readme = "README.md"
18
+ requires-python = ">= 3.8"
19
+
20
+ [build-system]
21
+ requires = ["hatchling"]
22
+ build-backend = "hatchling.build"
23
+
24
+ [tool.rye]
25
+ managed = true
26
+ dev-dependencies = []
27
+
28
+ [tool.hatch.metadata]
29
+ allow-direct-references = true
30
+
31
+ [tool.hatch.build.targets.wheel]
32
+ packages = ["app.py"]
33
+
requirements-dev.lock ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+
11
+ -e file:.
12
+ aiofiles==23.2.1
13
+ # via gradio
14
+ aiohttp==3.9.5
15
+ # via dashscope
16
+ # via langchain
17
+ aiosignal==1.3.1
18
+ # via aiohttp
19
+ altair==5.3.0
20
+ # via gradio
21
+ annotated-types==0.7.0
22
+ # via pydantic
23
+ anyio==4.4.0
24
+ # via httpx
25
+ # via openai
26
+ # via starlette
27
+ # via watchfiles
28
+ attrs==23.2.0
29
+ # via aiohttp
30
+ # via jsonschema
31
+ # via referencing
32
+ certifi==2024.7.4
33
+ # via httpcore
34
+ # via httpx
35
+ # via requests
36
+ charset-normalizer==3.3.2
37
+ # via requests
38
+ click==8.1.7
39
+ # via typer
40
+ # via uvicorn
41
+ contourpy==1.2.1
42
+ # via matplotlib
43
+ cycler==0.12.1
44
+ # via matplotlib
45
+ dashscope==1.20.1
46
+ # via startship
47
+ distro==1.9.0
48
+ # via openai
49
+ dnspython==2.6.1
50
+ # via email-validator
51
+ email-validator==2.2.0
52
+ # via fastapi
53
+ fastapi==0.111.0
54
+ # via gradio
55
+ fastapi-cli==0.0.4
56
+ # via fastapi
57
+ ffmpy==0.3.2
58
+ # via gradio
59
+ filelock==3.15.4
60
+ # via huggingface-hub
61
+ fonttools==4.53.1
62
+ # via matplotlib
63
+ frozenlist==1.4.1
64
+ # via aiohttp
65
+ # via aiosignal
66
+ fsspec==2024.2.0
67
+ # via gradio-client
68
+ # via huggingface-hub
69
+ gradio==4.38.1
70
+ # via startship
71
+ gradio-client==1.1.0
72
+ # via gradio
73
+ h11==0.14.0
74
+ # via httpcore
75
+ # via uvicorn
76
+ httpcore==1.0.5
77
+ # via httpx
78
+ httptools==0.6.1
79
+ # via uvicorn
80
+ httpx==0.27.0
81
+ # via fastapi
82
+ # via gradio
83
+ # via gradio-client
84
+ # via openai
85
+ huggingface-hub==0.23.4
86
+ # via gradio
87
+ # via gradio-client
88
+ idna==3.7
89
+ # via anyio
90
+ # via email-validator
91
+ # via httpx
92
+ # via requests
93
+ # via yarl
94
+ importlib-resources==6.4.0
95
+ # via gradio
96
+ jinja2==3.1.4
97
+ # via altair
98
+ # via fastapi
99
+ # via gradio
100
+ jsonpatch==1.33
101
+ # via langchain-core
102
+ jsonpointer==3.0.0
103
+ # via jsonpatch
104
+ jsonschema==4.23.0
105
+ # via altair
106
+ jsonschema-specifications==2023.12.1
107
+ # via jsonschema
108
+ kiwisolver==1.4.5
109
+ # via matplotlib
110
+ langchain==0.2.7
111
+ # via startship
112
+ langchain-core==0.2.18
113
+ # via langchain
114
+ # via langchain-openai
115
+ # via langchain-text-splitters
116
+ langchain-openai==0.1.16
117
+ # via startship
118
+ langchain-text-splitters==0.2.2
119
+ # via langchain
120
+ langsmith==0.1.85
121
+ # via langchain
122
+ # via langchain-core
123
+ markdown-it-py==3.0.0
124
+ # via rich
125
+ markupsafe==2.1.5
126
+ # via gradio
127
+ # via jinja2
128
+ matplotlib==3.9.1
129
+ # via gradio
130
+ mdurl==0.1.2
131
+ # via markdown-it-py
132
+ multidict==6.0.5
133
+ # via aiohttp
134
+ # via yarl
135
+ numpy==1.26.4
136
+ # via altair
137
+ # via contourpy
138
+ # via gradio
139
+ # via langchain
140
+ # via matplotlib
141
+ # via pandas
142
+ openai==1.35.13
143
+ # via langchain-openai
144
+ orjson==3.10.6
145
+ # via fastapi
146
+ # via gradio
147
+ # via langsmith
148
+ packaging==24.1
149
+ # via altair
150
+ # via gradio
151
+ # via gradio-client
152
+ # via huggingface-hub
153
+ # via langchain-core
154
+ # via matplotlib
155
+ pandas==2.2.2
156
+ # via altair
157
+ # via gradio
158
+ pillow==10.4.0
159
+ # via gradio
160
+ # via matplotlib
161
+ pydantic==2.8.2
162
+ # via fastapi
163
+ # via gradio
164
+ # via langchain
165
+ # via langchain-core
166
+ # via langsmith
167
+ # via openai
168
+ # via pydantic-settings
169
+ # via startship
170
+ pydantic-core==2.20.1
171
+ # via pydantic
172
+ pydantic-settings==2.3.4
173
+ # via startship
174
+ pydub==0.25.1
175
+ # via gradio
176
+ pygments==2.18.0
177
+ # via rich
178
+ pyparsing==3.1.2
179
+ # via matplotlib
180
+ python-dateutil==2.9.0.post0
181
+ # via matplotlib
182
+ # via pandas
183
+ python-dotenv==1.0.1
184
+ # via pydantic-settings
185
+ # via uvicorn
186
+ python-multipart==0.0.9
187
+ # via fastapi
188
+ # via gradio
189
+ pytz==2024.1
190
+ # via pandas
191
+ pyyaml==6.0.1
192
+ # via gradio
193
+ # via huggingface-hub
194
+ # via langchain
195
+ # via langchain-core
196
+ # via uvicorn
197
+ referencing==0.35.1
198
+ # via jsonschema
199
+ # via jsonschema-specifications
200
+ regex==2024.5.15
201
+ # via tiktoken
202
+ requests==2.32.3
203
+ # via dashscope
204
+ # via huggingface-hub
205
+ # via langchain
206
+ # via langsmith
207
+ # via tiktoken
208
+ rich==13.7.1
209
+ # via typer
210
+ rpds-py==0.19.0
211
+ # via jsonschema
212
+ # via referencing
213
+ ruff==0.5.1
214
+ # via gradio
215
+ semantic-version==2.10.0
216
+ # via gradio
217
+ setuptools==69.5.1
218
+ # via startship
219
+ shellingham==1.5.4
220
+ # via typer
221
+ six==1.16.0
222
+ # via python-dateutil
223
+ sniffio==1.3.1
224
+ # via anyio
225
+ # via httpx
226
+ # via openai
227
+ sqlalchemy==2.0.31
228
+ # via langchain
229
+ starlette==0.37.2
230
+ # via fastapi
231
+ tenacity==8.5.0
232
+ # via langchain
233
+ # via langchain-core
234
+ tiktoken==0.7.0
235
+ # via langchain-openai
236
+ tomlkit==0.12.0
237
+ # via gradio
238
+ toolz==0.12.1
239
+ # via altair
240
+ tqdm==4.66.4
241
+ # via huggingface-hub
242
+ # via openai
243
+ typer==0.12.3
244
+ # via fastapi-cli
245
+ # via gradio
246
+ typing-extensions==4.12.2
247
+ # via fastapi
248
+ # via gradio
249
+ # via gradio-client
250
+ # via huggingface-hub
251
+ # via openai
252
+ # via pydantic
253
+ # via pydantic-core
254
+ # via sqlalchemy
255
+ # via typer
256
+ tzdata==2024.1
257
+ # via pandas
258
+ ujson==5.10.0
259
+ # via fastapi
260
+ urllib3==2.2.2
261
+ # via gradio
262
+ # via requests
263
+ uvicorn==0.30.1
264
+ # via fastapi
265
+ # via gradio
266
+ uvloop==0.19.0
267
+ # via uvicorn
268
+ watchfiles==0.22.0
269
+ # via uvicorn
270
+ websocket-client==1.8.0
271
+ # via dashscope
272
+ websockets==11.0.3
273
+ # via gradio-client
274
+ # via uvicorn
275
+ yarl==1.9.4
276
+ # via aiohttp
requirements.lock ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+
11
+ -e file:.
12
+ aiofiles==23.2.1
13
+ # via gradio
14
+ aiohttp==3.9.5
15
+ # via dashscope
16
+ # via langchain
17
+ aiosignal==1.3.1
18
+ # via aiohttp
19
+ altair==5.3.0
20
+ # via gradio
21
+ annotated-types==0.7.0
22
+ # via pydantic
23
+ anyio==4.4.0
24
+ # via httpx
25
+ # via openai
26
+ # via starlette
27
+ # via watchfiles
28
+ attrs==23.2.0
29
+ # via aiohttp
30
+ # via jsonschema
31
+ # via referencing
32
+ certifi==2024.7.4
33
+ # via httpcore
34
+ # via httpx
35
+ # via requests
36
+ charset-normalizer==3.3.2
37
+ # via requests
38
+ click==8.1.7
39
+ # via typer
40
+ # via uvicorn
41
+ contourpy==1.2.1
42
+ # via matplotlib
43
+ cycler==0.12.1
44
+ # via matplotlib
45
+ dashscope==1.20.1
46
+ # via startship
47
+ distro==1.9.0
48
+ # via openai
49
+ dnspython==2.6.1
50
+ # via email-validator
51
+ email-validator==2.2.0
52
+ # via fastapi
53
+ fastapi==0.111.0
54
+ # via gradio
55
+ fastapi-cli==0.0.4
56
+ # via fastapi
57
+ ffmpy==0.3.2
58
+ # via gradio
59
+ filelock==3.15.4
60
+ # via huggingface-hub
61
+ fonttools==4.53.1
62
+ # via matplotlib
63
+ frozenlist==1.4.1
64
+ # via aiohttp
65
+ # via aiosignal
66
+ fsspec==2024.2.0
67
+ # via gradio-client
68
+ # via huggingface-hub
69
+ gradio==4.38.1
70
+ # via startship
71
+ gradio-client==1.1.0
72
+ # via gradio
73
+ h11==0.14.0
74
+ # via httpcore
75
+ # via uvicorn
76
+ httpcore==1.0.5
77
+ # via httpx
78
+ httptools==0.6.1
79
+ # via uvicorn
80
+ httpx==0.27.0
81
+ # via fastapi
82
+ # via gradio
83
+ # via gradio-client
84
+ # via openai
85
+ huggingface-hub==0.23.4
86
+ # via gradio
87
+ # via gradio-client
88
+ idna==3.7
89
+ # via anyio
90
+ # via email-validator
91
+ # via httpx
92
+ # via requests
93
+ # via yarl
94
+ importlib-resources==6.4.0
95
+ # via gradio
96
+ jinja2==3.1.4
97
+ # via altair
98
+ # via fastapi
99
+ # via gradio
100
+ jsonpatch==1.33
101
+ # via langchain-core
102
+ jsonpointer==3.0.0
103
+ # via jsonpatch
104
+ jsonschema==4.23.0
105
+ # via altair
106
+ jsonschema-specifications==2023.12.1
107
+ # via jsonschema
108
+ kiwisolver==1.4.5
109
+ # via matplotlib
110
+ langchain==0.2.7
111
+ # via startship
112
+ langchain-core==0.2.18
113
+ # via langchain
114
+ # via langchain-openai
115
+ # via langchain-text-splitters
116
+ langchain-openai==0.1.16
117
+ # via startship
118
+ langchain-text-splitters==0.2.2
119
+ # via langchain
120
+ langsmith==0.1.85
121
+ # via langchain
122
+ # via langchain-core
123
+ markdown-it-py==3.0.0
124
+ # via rich
125
+ markupsafe==2.1.5
126
+ # via gradio
127
+ # via jinja2
128
+ matplotlib==3.9.1
129
+ # via gradio
130
+ mdurl==0.1.2
131
+ # via markdown-it-py
132
+ multidict==6.0.5
133
+ # via aiohttp
134
+ # via yarl
135
+ numpy==1.26.4
136
+ # via altair
137
+ # via contourpy
138
+ # via gradio
139
+ # via langchain
140
+ # via matplotlib
141
+ # via pandas
142
+ openai==1.35.13
143
+ # via langchain-openai
144
+ orjson==3.10.6
145
+ # via fastapi
146
+ # via gradio
147
+ # via langsmith
148
+ packaging==24.1
149
+ # via altair
150
+ # via gradio
151
+ # via gradio-client
152
+ # via huggingface-hub
153
+ # via langchain-core
154
+ # via matplotlib
155
+ pandas==2.2.2
156
+ # via altair
157
+ # via gradio
158
+ pillow==10.4.0
159
+ # via gradio
160
+ # via matplotlib
161
+ pydantic==2.8.2
162
+ # via fastapi
163
+ # via gradio
164
+ # via langchain
165
+ # via langchain-core
166
+ # via langsmith
167
+ # via openai
168
+ # via pydantic-settings
169
+ # via startship
170
+ pydantic-core==2.20.1
171
+ # via pydantic
172
+ pydantic-settings==2.3.4
173
+ # via startship
174
+ pydub==0.25.1
175
+ # via gradio
176
+ pygments==2.18.0
177
+ # via rich
178
+ pyparsing==3.1.2
179
+ # via matplotlib
180
+ python-dateutil==2.9.0.post0
181
+ # via matplotlib
182
+ # via pandas
183
+ python-dotenv==1.0.1
184
+ # via pydantic-settings
185
+ # via uvicorn
186
+ python-multipart==0.0.9
187
+ # via fastapi
188
+ # via gradio
189
+ pytz==2024.1
190
+ # via pandas
191
+ pyyaml==6.0.1
192
+ # via gradio
193
+ # via huggingface-hub
194
+ # via langchain
195
+ # via langchain-core
196
+ # via uvicorn
197
+ referencing==0.35.1
198
+ # via jsonschema
199
+ # via jsonschema-specifications
200
+ regex==2024.5.15
201
+ # via tiktoken
202
+ requests==2.32.3
203
+ # via dashscope
204
+ # via huggingface-hub
205
+ # via langchain
206
+ # via langsmith
207
+ # via tiktoken
208
+ rich==13.7.1
209
+ # via typer
210
+ rpds-py==0.19.0
211
+ # via jsonschema
212
+ # via referencing
213
+ ruff==0.5.1
214
+ # via gradio
215
+ semantic-version==2.10.0
216
+ # via gradio
217
+ setuptools==69.5.1
218
+ # via startship
219
+ shellingham==1.5.4
220
+ # via typer
221
+ six==1.16.0
222
+ # via python-dateutil
223
+ sniffio==1.3.1
224
+ # via anyio
225
+ # via httpx
226
+ # via openai
227
+ sqlalchemy==2.0.31
228
+ # via langchain
229
+ starlette==0.37.2
230
+ # via fastapi
231
+ tenacity==8.5.0
232
+ # via langchain
233
+ # via langchain-core
234
+ tiktoken==0.7.0
235
+ # via langchain-openai
236
+ tomlkit==0.12.0
237
+ # via gradio
238
+ toolz==0.12.1
239
+ # via altair
240
+ tqdm==4.66.4
241
+ # via huggingface-hub
242
+ # via openai
243
+ typer==0.12.3
244
+ # via fastapi-cli
245
+ # via gradio
246
+ typing-extensions==4.12.2
247
+ # via fastapi
248
+ # via gradio
249
+ # via gradio-client
250
+ # via huggingface-hub
251
+ # via openai
252
+ # via pydantic
253
+ # via pydantic-core
254
+ # via sqlalchemy
255
+ # via typer
256
+ tzdata==2024.1
257
+ # via pandas
258
+ ujson==5.10.0
259
+ # via fastapi
260
+ urllib3==2.2.2
261
+ # via gradio
262
+ # via requests
263
+ uvicorn==0.30.1
264
+ # via fastapi
265
+ # via gradio
266
+ uvloop==0.19.0
267
+ # via uvicorn
268
+ watchfiles==0.22.0
269
+ # via uvicorn
270
+ websocket-client==1.8.0
271
+ # via dashscope
272
+ websockets==11.0.3
273
+ # via gradio-client
274
+ # via uvicorn
275
+ yarl==1.9.4
276
+ # via aiohttp
requirements.txt ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+ # universal: false
11
+ --extra-index-url https://download.pytorch.org/whl/cu121
12
+
13
+ absl-py==2.1.0
14
+ # via keras
15
+ # via tensorboard
16
+ # via tensorflow-intel
17
+ accelerate==0.32.1
18
+ # via aitoolkits-webui
19
+ addict==2.4.0
20
+ # via modelscope
21
+ aiofiles==23.2.1
22
+ # via gradio
23
+ aiohttp==3.9.5
24
+ # via dashscope
25
+ # via datasets
26
+ # via fsspec
27
+ # via langchain
28
+ aiosignal==1.3.1
29
+ # via aiohttp
30
+ aliyun-python-sdk-core==2.15.1
31
+ # via aliyun-python-sdk-kms
32
+ # via oss2
33
+ aliyun-python-sdk-kms==2.16.3
34
+ # via oss2
35
+ altair==5.3.0
36
+ # via gradio
37
+ annotated-types==0.7.0
38
+ # via pydantic
39
+ anyio==4.4.0
40
+ # via httpx
41
+ # via openai
42
+ # via starlette
43
+ # via watchfiles
44
+ astunparse==1.6.3
45
+ # via tensorflow-intel
46
+ async-timeout==4.0.3
47
+ # via aiohttp
48
+ # via langchain
49
+ attrs==23.2.0
50
+ # via aiohttp
51
+ # via jsonschema
52
+ # via modelscope
53
+ # via referencing
54
+ certifi==2024.7.4
55
+ # via httpcore
56
+ # via httpx
57
+ # via requests
58
+ cffi==1.16.0
59
+ # via cryptography
60
+ charset-normalizer==3.3.2
61
+ # via requests
62
+ click==8.1.7
63
+ # via typer
64
+ # via uvicorn
65
+ colorama==0.4.6
66
+ # via click
67
+ # via tqdm
68
+ # via uvicorn
69
+ contourpy==1.2.1
70
+ # via matplotlib
71
+ crcmod==1.7
72
+ # via oss2
73
+ cryptography==42.0.8
74
+ # via aliyun-python-sdk-core
75
+ cycler==0.12.1
76
+ # via matplotlib
77
+ dashscope==1.20.1
78
+ # via aitoolkits-webui
79
+ datasets==2.18.0
80
+ # via modelscope
81
+ diffusers==0.29.2
82
+ # via aitoolkits-webui
83
+ dill==0.3.8
84
+ # via datasets
85
+ # via multiprocess
86
+ distro==1.9.0
87
+ # via openai
88
+ dnspython==2.6.1
89
+ # via email-validator
90
+ einops==0.8.0
91
+ # via modelscope
92
+ email-validator==2.2.0
93
+ # via fastapi
94
+ exceptiongroup==1.2.2
95
+ # via anyio
96
+ fastapi==0.111.0
97
+ # via gradio
98
+ fastapi-cli==0.0.4
99
+ # via fastapi
100
+ ffmpy==0.3.2
101
+ # via gradio
102
+ filelock==3.15.4
103
+ # via datasets
104
+ # via diffusers
105
+ # via huggingface-hub
106
+ # via torch
107
+ # via transformers
108
+ flatbuffers==24.3.25
109
+ # via tensorflow-intel
110
+ fonttools==4.53.1
111
+ # via matplotlib
112
+ frozenlist==1.4.1
113
+ # via aiohttp
114
+ # via aiosignal
115
+ fsspec==2024.2.0
116
+ # via datasets
117
+ # via gradio-client
118
+ # via huggingface-hub
119
+ # via torch
120
+ gast==0.6.0
121
+ # via tensorflow-intel
122
+ google-pasta==0.2.0
123
+ # via tensorflow-intel
124
+ gradio==4.38.1
125
+ # via aitoolkits-webui
126
+ gradio-client==1.1.0
127
+ # via gradio
128
+ greenlet==3.0.3
129
+ # via sqlalchemy
130
+ grpcio==1.65.1
131
+ # via tensorboard
132
+ # via tensorflow-intel
133
+ h11==0.14.0
134
+ # via httpcore
135
+ # via uvicorn
136
+ h5py==3.11.0
137
+ # via keras
138
+ # via tensorflow-intel
139
+ httpcore==1.0.5
140
+ # via httpx
141
+ httptools==0.6.1
142
+ # via uvicorn
143
+ httpx==0.27.0
144
+ # via fastapi
145
+ # via gradio
146
+ # via gradio-client
147
+ # via openai
148
+ huggingface-hub==0.23.4
149
+ # via accelerate
150
+ # via datasets
151
+ # via diffusers
152
+ # via gradio
153
+ # via gradio-client
154
+ # via tokenizers
155
+ # via transformers
156
+ idna==3.7
157
+ # via anyio
158
+ # via email-validator
159
+ # via httpx
160
+ # via requests
161
+ # via yarl
162
+ importlib-metadata==8.0.0
163
+ # via diffusers
164
+ importlib-resources==6.4.0
165
+ # via gradio
166
+ intel-openmp==2021.4.0
167
+ # via mkl
168
+ jinja2==3.1.4
169
+ # via altair
170
+ # via fastapi
171
+ # via gradio
172
+ # via torch
173
+ jmespath==0.10.0
174
+ # via aliyun-python-sdk-core
175
+ jsonpatch==1.33
176
+ # via langchain-core
177
+ jsonpointer==3.0.0
178
+ # via jsonpatch
179
+ jsonschema==4.23.0
180
+ # via altair
181
+ jsonschema-specifications==2023.12.1
182
+ # via jsonschema
183
+ keras==3.4.1
184
+ # via tensorflow-intel
185
+ kiwisolver==1.4.5
186
+ # via matplotlib
187
+ kornia==0.7.3
188
+ # via aitoolkits-webui
189
+ kornia-rs==0.1.5
190
+ # via kornia
191
+ langchain==0.2.7
192
+ # via aitoolkits-webui
193
+ langchain-core==0.2.18
194
+ # via langchain
195
+ # via langchain-openai
196
+ # via langchain-text-splitters
197
+ langchain-openai==0.1.16
198
+ # via aitoolkits-webui
199
+ langchain-text-splitters==0.2.2
200
+ # via langchain
201
+ langsmith==0.1.85
202
+ # via langchain
203
+ # via langchain-core
204
+ libclang==18.1.1
205
+ # via tensorflow-intel
206
+ markdown==3.6
207
+ # via tensorboard
208
+ markdown-it-py==3.0.0
209
+ # via rich
210
+ markupsafe==2.1.5
211
+ # via gradio
212
+ # via jinja2
213
+ # via werkzeug
214
+ matplotlib==3.9.1
215
+ # via gradio
216
+ mdurl==0.1.2
217
+ # via markdown-it-py
218
+ mkl==2021.4.0
219
+ # via torch
220
+ ml-dtypes==0.4.0
221
+ # via keras
222
+ # via tensorflow-intel
223
+ modelscope==1.16.1
224
+ # via aitoolkits-webui
225
+ # via modelscope
226
+ mpmath==1.3.0
227
+ # via sympy
228
+ multidict==6.0.5
229
+ # via aiohttp
230
+ # via yarl
231
+ multiprocess==0.70.16
232
+ # via datasets
233
+ namex==0.0.8
234
+ # via keras
235
+ networkx==3.3
236
+ # via torch
237
+ numpy==1.26.4
238
+ # via accelerate
239
+ # via altair
240
+ # via contourpy
241
+ # via datasets
242
+ # via diffusers
243
+ # via gradio
244
+ # via h5py
245
+ # via keras
246
+ # via langchain
247
+ # via matplotlib
248
+ # via ml-dtypes
249
+ # via opencv-python
250
+ # via opt-einsum
251
+ # via pandas
252
+ # via pyarrow
253
+ # via scipy
254
+ # via tensorboard
255
+ # via tensorflow-intel
256
+ # via torchvision
257
+ # via transformers
258
+ # via xformers
259
+ openai==1.35.13
260
+ # via langchain-openai
261
+ opencv-python==4.10.0.84
262
+ # via aitoolkits-webui
263
+ opt-einsum==3.3.0
264
+ # via tensorflow-intel
265
+ optree==0.12.1
266
+ # via keras
267
+ orjson==3.10.6
268
+ # via fastapi
269
+ # via gradio
270
+ # via langsmith
271
+ oss2==2.18.6
272
+ # via aitoolkits-webui
273
+ # via modelscope
274
+ packaging==24.1
275
+ # via accelerate
276
+ # via altair
277
+ # via datasets
278
+ # via gradio
279
+ # via gradio-client
280
+ # via huggingface-hub
281
+ # via keras
282
+ # via kornia
283
+ # via langchain-core
284
+ # via matplotlib
285
+ # via tensorflow-intel
286
+ # via transformers
287
+ pandas==2.2.2
288
+ # via altair
289
+ # via datasets
290
+ # via gradio
291
+ pillow==10.4.0
292
+ # via diffusers
293
+ # via gradio
294
+ # via matplotlib
295
+ # via torchvision
296
+ protobuf==4.25.3
297
+ # via tensorboard
298
+ # via tensorflow-intel
299
+ psutil==6.0.0
300
+ # via accelerate
301
+ pyarrow==17.0.0
302
+ # via datasets
303
+ pyarrow-hotfix==0.6
304
+ # via datasets
305
+ pycparser==2.22
306
+ # via cffi
307
+ pycryptodome==3.20.0
308
+ # via oss2
309
+ pydantic==2.8.2
310
+ # via aitoolkits-webui
311
+ # via fastapi
312
+ # via gradio
313
+ # via langchain
314
+ # via langchain-core
315
+ # via langsmith
316
+ # via openai
317
+ # via pydantic-settings
318
+ pydantic-core==2.20.1
319
+ # via pydantic
320
+ pydantic-settings==2.3.4
321
+ # via aitoolkits-webui
322
+ pydub==0.25.1
323
+ # via gradio
324
+ pygments==2.18.0
325
+ # via rich
326
+ pyparsing==3.1.2
327
+ # via matplotlib
328
+ python-dateutil==2.9.0.post0
329
+ # via matplotlib
330
+ # via modelscope
331
+ # via pandas
332
+ python-dotenv==1.0.1
333
+ # via pydantic-settings
334
+ # via uvicorn
335
+ python-multipart==0.0.9
336
+ # via fastapi
337
+ # via gradio
338
+ pytz==2024.1
339
+ # via pandas
340
+ pyyaml==6.0.1
341
+ # via accelerate
342
+ # via datasets
343
+ # via gradio
344
+ # via huggingface-hub
345
+ # via langchain
346
+ # via langchain-core
347
+ # via transformers
348
+ # via uvicorn
349
+ referencing==0.35.1
350
+ # via jsonschema
351
+ # via jsonschema-specifications
352
+ regex==2024.5.15
353
+ # via diffusers
354
+ # via tiktoken
355
+ # via transformers
356
+ requests==2.32.3
357
+ # via dashscope
358
+ # via datasets
359
+ # via diffusers
360
+ # via huggingface-hub
361
+ # via langchain
362
+ # via langsmith
363
+ # via modelscope
364
+ # via oss2
365
+ # via tensorflow-intel
366
+ # via tiktoken
367
+ # via transformers
368
+ rich==13.7.1
369
+ # via keras
370
+ # via typer
371
+ rpds-py==0.19.0
372
+ # via jsonschema
373
+ # via referencing
374
+ ruff==0.5.1
375
+ # via gradio
376
+ safetensors==0.4.3
377
+ # via accelerate
378
+ # via diffusers
379
+ # via transformers
380
+ scipy==1.14.0
381
+ # via modelscope
382
+ semantic-version==2.10.0
383
+ # via gradio
384
+ shellingham==1.5.4
385
+ # via typer
386
+ simplejson==3.19.2
387
+ # via modelscope
388
+ six==1.16.0
389
+ # via astunparse
390
+ # via google-pasta
391
+ # via oss2
392
+ # via python-dateutil
393
+ # via tensorboard
394
+ # via tensorflow-intel
395
+ sniffio==1.3.1
396
+ # via anyio
397
+ # via httpx
398
+ # via openai
399
+ sortedcontainers==2.4.0
400
+ # via modelscope
401
+ sqlalchemy==2.0.31
402
+ # via langchain
403
+ starlette==0.37.2
404
+ # via fastapi
405
+ sympy==1.13.0
406
+ # via torch
407
+ tbb==2021.13.0
408
+ # via mkl
409
+ tenacity==8.5.0
410
+ # via langchain
411
+ # via langchain-core
412
+ tensorboard==2.17.0
413
+ # via tensorflow-intel
414
+ tensorboard-data-server==0.7.2
415
+ # via tensorboard
416
+ tensorflow==2.17.0
417
+ # via aitoolkits-webui
418
+ tensorflow-io-gcs-filesystem==0.31.0
419
+ # via tensorflow-intel
420
+ termcolor==2.4.0
421
+ # via tensorflow-intel
422
+ tiktoken==0.7.0
423
+ # via langchain-openai
424
+ tokenizers==0.19.1
425
+ # via transformers
426
+ tomlkit==0.12.0
427
+ # via gradio
428
+ toolz==0.12.1
429
+ # via altair
430
+ torch==2.3.1+cu121
431
+ # via accelerate
432
+ # via aitoolkits-webui
433
+ # via kornia
434
+ # via torchaudio
435
+ # via torchvision
436
+ # via xformers
437
+ torchaudio==2.3.1+cu121
438
+ # via aitoolkits-webui
439
+ torchvision==0.18.1+cu121
440
+ # via aitoolkits-webui
441
+ tqdm==4.66.4
442
+ # via datasets
443
+ # via huggingface-hub
444
+ # via modelscope
445
+ # via openai
446
+ # via transformers
447
+ transformers==4.42.4
448
+ # via aitoolkits-webui
449
+ typer==0.12.3
450
+ # via fastapi-cli
451
+ # via gradio
452
+ typing-extensions==4.12.2
453
+ # via altair
454
+ # via anyio
455
+ # via fastapi
456
+ # via gradio
457
+ # via gradio-client
458
+ # via huggingface-hub
459
+ # via openai
460
+ # via optree
461
+ # via pydantic
462
+ # via pydantic-core
463
+ # via sqlalchemy
464
+ # via tensorflow-intel
465
+ # via torch
466
+ # via typer
467
+ # via uvicorn
468
+ tzdata==2024.1
469
+ # via pandas
470
+ ujson==5.10.0
471
+ # via fastapi
472
+ urllib3==2.2.2
473
+ # via gradio
474
+ # via modelscope
475
+ # via requests
476
+ uvicorn==0.30.1
477
+ # via fastapi
478
+ # via gradio
479
+ watchfiles==0.22.0
480
+ # via uvicorn
481
+ websocket-client==1.8.0
482
+ # via dashscope
483
+ websockets==11.0.3
484
+ # via gradio-client
485
+ # via uvicorn
486
+ werkzeug==3.0.3
487
+ # via tensorboard
488
+ wheel==0.43.0
489
+ # via astunparse
490
+ wrapt==1.16.0
491
+ # via tensorflow-intel
492
+ xformers==0.0.27
493
+ # via aitoolkits-webui
494
+ xxhash==3.4.1
495
+ # via datasets
496
+ yarl==1.9.4
497
+ # via aiohttp
498
+ zipp==3.19.2
499
+ # via importlib-metadata
500
+ setuptools==69.5.1
501
+ # via aitoolkits-webui
502
+ # via modelscope
503
+ # via tensorboard
504
+ # via tensorflow-intel