renamed ChatAtomicFlow + readme + demo
Browse files- .gitignore +443 -1
- OpenAIChatAtomicFlow.py → ChatAtomicFlow.py +154 -10
- OpenAIChatAtomicFlow.yaml → ChatAtomicFlow.yaml +2 -3
- README.md +142 -12
- __init__.py +1 -1
- simpleQA.yaml → demo.yaml +18 -13
- pip_requirements.py +0 -1
- pip_requirements.txt +0 -0
- run.py +2 -3
.gitignore
CHANGED
@@ -1 +1,443 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python,java,c++,pycharm,visualstudiocode,macos,linux,windows
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python,java,c++,pycharm,visualstudiocode,macos,linux,windows
|
3 |
+
|
4 |
+
### C++ ###
|
5 |
+
# Prerequisites
|
6 |
+
*.d
|
7 |
+
|
8 |
+
# Compiled Object files
|
9 |
+
*.slo
|
10 |
+
*.lo
|
11 |
+
*.o
|
12 |
+
*.obj
|
13 |
+
|
14 |
+
# Precompiled Headers
|
15 |
+
*.gch
|
16 |
+
*.pch
|
17 |
+
|
18 |
+
# Compiled Dynamic libraries
|
19 |
+
*.so
|
20 |
+
*.dylib
|
21 |
+
*.dll
|
22 |
+
|
23 |
+
# Fortran module files
|
24 |
+
*.mod
|
25 |
+
*.smod
|
26 |
+
|
27 |
+
# Compiled Static libraries
|
28 |
+
*.lai
|
29 |
+
*.la
|
30 |
+
*.a
|
31 |
+
*.lib
|
32 |
+
|
33 |
+
# Executables
|
34 |
+
*.exe
|
35 |
+
*.out
|
36 |
+
*.app
|
37 |
+
|
38 |
+
### Java ###
|
39 |
+
# Compiled class file
|
40 |
+
*.class
|
41 |
+
|
42 |
+
# Log file
|
43 |
+
*.log
|
44 |
+
|
45 |
+
# BlueJ files
|
46 |
+
*.ctxt
|
47 |
+
|
48 |
+
# Mobile Tools for Java (J2ME)
|
49 |
+
.mtj.tmp/
|
50 |
+
|
51 |
+
# Package Files #
|
52 |
+
*.jar
|
53 |
+
*.war
|
54 |
+
*.nar
|
55 |
+
*.ear
|
56 |
+
*.zip
|
57 |
+
*.tar.gz
|
58 |
+
*.rar
|
59 |
+
|
60 |
+
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
61 |
+
hs_err_pid*
|
62 |
+
replay_pid*
|
63 |
+
|
64 |
+
### Linux ###
|
65 |
+
*~
|
66 |
+
|
67 |
+
# temporary files which can be created if a process still has a handle open of a deleted file
|
68 |
+
.fuse_hidden*
|
69 |
+
|
70 |
+
# KDE directory preferences
|
71 |
+
.directory
|
72 |
+
|
73 |
+
# Linux trash folder which might appear on any partition or disk
|
74 |
+
.Trash-*
|
75 |
+
|
76 |
+
# .nfs files are created when an open file is removed but is still being accessed
|
77 |
+
.nfs*
|
78 |
+
|
79 |
+
### macOS ###
|
80 |
+
# General
|
81 |
+
.DS_Store
|
82 |
+
.AppleDouble
|
83 |
+
.LSOverride
|
84 |
+
|
85 |
+
# Icon must end with two \r
|
86 |
+
Icon
|
87 |
+
|
88 |
+
|
89 |
+
# Thumbnails
|
90 |
+
._*
|
91 |
+
|
92 |
+
# Files that might appear in the root of a volume
|
93 |
+
.DocumentRevisions-V100
|
94 |
+
.fseventsd
|
95 |
+
.Spotlight-V100
|
96 |
+
.TemporaryItems
|
97 |
+
.Trashes
|
98 |
+
.VolumeIcon.icns
|
99 |
+
.com.apple.timemachine.donotpresent
|
100 |
+
|
101 |
+
# Directories potentially created on remote AFP share
|
102 |
+
.AppleDB
|
103 |
+
.AppleDesktop
|
104 |
+
Network Trash Folder
|
105 |
+
Temporary Items
|
106 |
+
.apdisk
|
107 |
+
|
108 |
+
### macOS Patch ###
|
109 |
+
# iCloud generated files
|
110 |
+
*.icloud
|
111 |
+
|
112 |
+
### PyCharm ###
|
113 |
+
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
114 |
+
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
115 |
+
|
116 |
+
# User-specific stuff
|
117 |
+
.idea/**/workspace.xml
|
118 |
+
.idea/**/tasks.xml
|
119 |
+
.idea/**/usage.statistics.xml
|
120 |
+
.idea/**/dictionaries
|
121 |
+
.idea/**/shelf
|
122 |
+
|
123 |
+
# AWS User-specific
|
124 |
+
.idea/**/aws.xml
|
125 |
+
|
126 |
+
# Generated files
|
127 |
+
.idea/**/contentModel.xml
|
128 |
+
|
129 |
+
# Sensitive or high-churn files
|
130 |
+
.idea/**/dataSources/
|
131 |
+
.idea/**/dataSources.ids
|
132 |
+
.idea/**/dataSources.local.xml
|
133 |
+
.idea/**/sqlDataSources.xml
|
134 |
+
.idea/**/dynamic.xml
|
135 |
+
.idea/**/uiDesigner.xml
|
136 |
+
.idea/**/dbnavigator.xml
|
137 |
+
|
138 |
+
# Gradle
|
139 |
+
.idea/**/gradle.xml
|
140 |
+
.idea/**/libraries
|
141 |
+
|
142 |
+
# Gradle and Maven with auto-import
|
143 |
+
# When using Gradle or Maven with auto-import, you should exclude module files,
|
144 |
+
# since they will be recreated, and may cause churn. Uncomment if using
|
145 |
+
# auto-import.
|
146 |
+
# .idea/artifacts
|
147 |
+
# .idea/compiler.xml
|
148 |
+
# .idea/jarRepositories.xml
|
149 |
+
# .idea/modules.xml
|
150 |
+
# .idea/*.iml
|
151 |
+
# .idea/modules
|
152 |
+
# *.iml
|
153 |
+
# *.ipr
|
154 |
+
|
155 |
+
# CMake
|
156 |
+
cmake-build-*/
|
157 |
+
|
158 |
+
# Mongo Explorer plugin
|
159 |
+
.idea/**/mongoSettings.xml
|
160 |
+
|
161 |
+
# File-based project format
|
162 |
+
*.iws
|
163 |
+
|
164 |
+
# IntelliJ
|
165 |
+
out/
|
166 |
+
|
167 |
+
# mpeltonen/sbt-idea plugin
|
168 |
+
.idea_modules/
|
169 |
+
|
170 |
+
# JIRA plugin
|
171 |
+
atlassian-ide-plugin.xml
|
172 |
+
|
173 |
+
# Cursive Clojure plugin
|
174 |
+
.idea/replstate.xml
|
175 |
+
|
176 |
+
# SonarLint plugin
|
177 |
+
.idea/sonarlint/
|
178 |
+
|
179 |
+
# Crashlytics plugin (for Android Studio and IntelliJ)
|
180 |
+
com_crashlytics_export_strings.xml
|
181 |
+
crashlytics.properties
|
182 |
+
crashlytics-build.properties
|
183 |
+
fabric.properties
|
184 |
+
|
185 |
+
# Editor-based Rest Client
|
186 |
+
.idea/httpRequests
|
187 |
+
|
188 |
+
# Android studio 3.1+ serialized cache file
|
189 |
+
.idea/caches/build_file_checksums.ser
|
190 |
+
|
191 |
+
### PyCharm Patch ###
|
192 |
+
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
|
193 |
+
|
194 |
+
# *.iml
|
195 |
+
# modules.xml
|
196 |
+
# .idea/misc.xml
|
197 |
+
# *.ipr
|
198 |
+
|
199 |
+
# Sonarlint plugin
|
200 |
+
# https://plugins.jetbrains.com/plugin/7973-sonarlint
|
201 |
+
.idea/**/sonarlint/
|
202 |
+
|
203 |
+
# SonarQube Plugin
|
204 |
+
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
|
205 |
+
.idea/**/sonarIssues.xml
|
206 |
+
|
207 |
+
# Markdown Navigator plugin
|
208 |
+
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
|
209 |
+
.idea/**/markdown-navigator.xml
|
210 |
+
.idea/**/markdown-navigator-enh.xml
|
211 |
+
.idea/**/markdown-navigator/
|
212 |
+
|
213 |
+
# Cache file creation bug
|
214 |
+
# See https://youtrack.jetbrains.com/issue/JBR-2257
|
215 |
+
.idea/$CACHE_FILE$
|
216 |
+
|
217 |
+
# CodeStream plugin
|
218 |
+
# https://plugins.jetbrains.com/plugin/12206-codestream
|
219 |
+
.idea/codestream.xml
|
220 |
+
|
221 |
+
# Azure Toolkit for IntelliJ plugin
|
222 |
+
# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
|
223 |
+
.idea/**/azureSettings.xml
|
224 |
+
|
225 |
+
### Python ###
|
226 |
+
# Byte-compiled / optimized / DLL files
|
227 |
+
__pycache__/
|
228 |
+
*.py[cod]
|
229 |
+
*$py.class
|
230 |
+
|
231 |
+
# C extensions
|
232 |
+
|
233 |
+
# Distribution / packaging
|
234 |
+
.Python
|
235 |
+
build/
|
236 |
+
develop-eggs/
|
237 |
+
dist/
|
238 |
+
downloads/
|
239 |
+
eggs/
|
240 |
+
.eggs/
|
241 |
+
lib/
|
242 |
+
lib64/
|
243 |
+
parts/
|
244 |
+
sdist/
|
245 |
+
var/
|
246 |
+
wheels/
|
247 |
+
share/python-wheels/
|
248 |
+
*.egg-info/
|
249 |
+
.installed.cfg
|
250 |
+
*.egg
|
251 |
+
MANIFEST
|
252 |
+
|
253 |
+
# PyInstaller
|
254 |
+
# Usually these files are written by a python script from a template
|
255 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
256 |
+
*.manifest
|
257 |
+
*.spec
|
258 |
+
|
259 |
+
# Installer logs
|
260 |
+
pip-log.txt
|
261 |
+
pip-delete-this-directory.txt
|
262 |
+
|
263 |
+
# Unit test / coverage reports
|
264 |
+
htmlcov/
|
265 |
+
.tox/
|
266 |
+
.nox/
|
267 |
+
.coverage
|
268 |
+
.coverage.*
|
269 |
+
.cache
|
270 |
+
nosetests.xml
|
271 |
+
coverage.xml
|
272 |
+
*.cover
|
273 |
+
*.py,cover
|
274 |
+
.hypothesis/
|
275 |
+
.pytest_cache/
|
276 |
+
cover/
|
277 |
+
|
278 |
+
# Translations
|
279 |
+
*.mo
|
280 |
+
*.pot
|
281 |
+
|
282 |
+
# Django stuff:
|
283 |
+
local_settings.py
|
284 |
+
db.sqlite3
|
285 |
+
db.sqlite3-journal
|
286 |
+
|
287 |
+
# Flask stuff:
|
288 |
+
instance/
|
289 |
+
.webassets-cache
|
290 |
+
|
291 |
+
# Scrapy stuff:
|
292 |
+
.scrapy
|
293 |
+
|
294 |
+
# Sphinx documentation
|
295 |
+
docs/_build/
|
296 |
+
|
297 |
+
# PyBuilder
|
298 |
+
.pybuilder/
|
299 |
+
target/
|
300 |
+
|
301 |
+
# Jupyter Notebook
|
302 |
+
.ipynb_checkpoints
|
303 |
+
|
304 |
+
# IPython
|
305 |
+
profile_default/
|
306 |
+
ipython_config.py
|
307 |
+
|
308 |
+
# pyenv
|
309 |
+
# For a library or package, you might want to ignore these files since the code is
|
310 |
+
# intended to run in multiple environments; otherwise, check them in:
|
311 |
+
# .python-version
|
312 |
+
|
313 |
+
# pipenv
|
314 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
315 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
316 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
317 |
+
# install all needed dependencies.
|
318 |
+
#Pipfile.lock
|
319 |
+
|
320 |
+
# poetry
|
321 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
322 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
323 |
+
# commonly ignored for libraries.
|
324 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
325 |
+
#poetry.lock
|
326 |
+
|
327 |
+
# pdm
|
328 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
329 |
+
#pdm.lock
|
330 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
331 |
+
# in version control.
|
332 |
+
# https://pdm.fming.dev/#use-with-ide
|
333 |
+
.pdm.toml
|
334 |
+
|
335 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
336 |
+
__pypackages__/
|
337 |
+
|
338 |
+
# Celery stuff
|
339 |
+
celerybeat-schedule
|
340 |
+
celerybeat.pid
|
341 |
+
|
342 |
+
# SageMath parsed files
|
343 |
+
*.sage.py
|
344 |
+
|
345 |
+
# Environments
|
346 |
+
.env
|
347 |
+
.venv
|
348 |
+
env/
|
349 |
+
venv/
|
350 |
+
ENV/
|
351 |
+
env.bak/
|
352 |
+
venv.bak/
|
353 |
+
|
354 |
+
# Spyder project settings
|
355 |
+
.spyderproject
|
356 |
+
.spyproject
|
357 |
+
|
358 |
+
# Rope project settings
|
359 |
+
.ropeproject
|
360 |
+
|
361 |
+
# mkdocs documentation
|
362 |
+
/site
|
363 |
+
|
364 |
+
# mypy
|
365 |
+
.mypy_cache/
|
366 |
+
.dmypy.json
|
367 |
+
dmypy.json
|
368 |
+
|
369 |
+
# Pyre type checker
|
370 |
+
.pyre/
|
371 |
+
|
372 |
+
# pytype static type analyzer
|
373 |
+
.pytype/
|
374 |
+
|
375 |
+
# Cython debug symbols
|
376 |
+
cython_debug/
|
377 |
+
|
378 |
+
# PyCharm
|
379 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
380 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
381 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
382 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
383 |
+
#.idea/
|
384 |
+
|
385 |
+
### Python Patch ###
|
386 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
387 |
+
poetry.toml
|
388 |
+
|
389 |
+
# ruff
|
390 |
+
.ruff_cache/
|
391 |
+
|
392 |
+
# LSP config files
|
393 |
+
pyrightconfig.json
|
394 |
+
|
395 |
+
### VisualStudioCode ###
|
396 |
+
.vscode/*
|
397 |
+
!.vscode/settings.json
|
398 |
+
!.vscode/tasks.json
|
399 |
+
!.vscode/launch.json
|
400 |
+
!.vscode/extensions.json
|
401 |
+
!.vscode/*.code-snippets
|
402 |
+
|
403 |
+
# Local History for Visual Studio Code
|
404 |
+
.history/
|
405 |
+
|
406 |
+
# Built Visual Studio Code Extensions
|
407 |
+
*.vsix
|
408 |
+
|
409 |
+
### VisualStudioCode Patch ###
|
410 |
+
# Ignore all local history of files
|
411 |
+
.history
|
412 |
+
.ionide
|
413 |
+
|
414 |
+
### Windows ###
|
415 |
+
# Windows thumbnail cache files
|
416 |
+
Thumbs.db
|
417 |
+
Thumbs.db:encryptable
|
418 |
+
ehthumbs.db
|
419 |
+
ehthumbs_vista.db
|
420 |
+
|
421 |
+
# Dump file
|
422 |
+
*.stackdump
|
423 |
+
|
424 |
+
# Folder config file
|
425 |
+
[Dd]esktop.ini
|
426 |
+
|
427 |
+
# Recycle Bin used on file shares
|
428 |
+
$RECYCLE.BIN/
|
429 |
+
|
430 |
+
# Windows Installer files
|
431 |
+
*.cab
|
432 |
+
*.msi
|
433 |
+
*.msix
|
434 |
+
*.msm
|
435 |
+
*.msp
|
436 |
+
|
437 |
+
# Windows shortcuts
|
438 |
+
*.lnk
|
439 |
+
|
440 |
+
# End of https://www.toptal.com/developers/gitignore/api/python,java,c++,pycharm,visualstudiocode,macos,linux,windows
|
441 |
+
|
442 |
+
.*
|
443 |
+
flow_modules/
|
OpenAIChatAtomicFlow.py → ChatAtomicFlow.py
RENAMED
@@ -17,7 +17,78 @@ from flows.backends.llm_lite import LiteLLMBackend
|
|
17 |
log = logging.get_logger(__name__)
|
18 |
|
19 |
|
20 |
-
class
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
REQUIRED_KEYS_CONFIG = ["backend"]
|
22 |
|
23 |
SUPPORTS_CACHING: bool = True
|
@@ -46,11 +117,19 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
46 |
], f"Flow name '{self.flow_config['name']}' cannot be 'system', 'user' or 'assistant'"
|
47 |
|
48 |
def set_up_flow_state(self):
|
|
|
49 |
super().set_up_flow_state()
|
50 |
self.flow_state["previous_messages"] = []
|
51 |
|
52 |
@classmethod
|
53 |
def _set_up_prompts(cls, config):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
kwargs = {}
|
55 |
|
56 |
kwargs["system_message_prompt_template"] = \
|
@@ -64,6 +143,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
64 |
|
65 |
@classmethod
|
66 |
def _set_up_backend(cls, config):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
kwargs = {}
|
68 |
|
69 |
kwargs["backend"] = \
|
@@ -73,6 +159,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
73 |
|
74 |
@classmethod
|
75 |
def instantiate_from_config(cls, config):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
flow_config = deepcopy(config)
|
77 |
|
78 |
kwargs = {"flow_config": flow_config}
|
@@ -85,12 +178,22 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
85 |
return cls(**kwargs)
|
86 |
|
87 |
def _is_conversation_initialized(self):
|
|
|
|
|
|
|
|
|
|
|
88 |
if len(self.flow_state["previous_messages"]) > 0:
|
89 |
return True
|
90 |
|
91 |
return False
|
92 |
|
93 |
def get_interface_description(self):
|
|
|
|
|
|
|
|
|
|
|
94 |
if self._is_conversation_initialized():
|
95 |
|
96 |
return {"input": self.flow_config["input_interface_initialized"],
|
@@ -101,19 +204,26 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
101 |
|
102 |
@staticmethod
|
103 |
def _get_message(prompt_template, input_data: Dict[str, Any]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
template_kwargs = {}
|
105 |
for input_variable in prompt_template.input_variables:
|
106 |
template_kwargs[input_variable] = input_data[input_variable]
|
107 |
msg_content = prompt_template.format(**template_kwargs)
|
108 |
return msg_content
|
109 |
|
110 |
-
def _get_demonstration_query_message_content(self, sample_data: Dict):
|
111 |
-
input_variables = self.init_human_message_prompt_template.input_variables
|
112 |
-
return self.init_human_message_prompt_template.format(**{k: sample_data[k] for k in input_variables})
|
113 |
-
|
114 |
-
|
115 |
def _add_demonstrations(self, input_data: Dict[str, Any]):
|
|
|
116 |
|
|
|
|
|
|
|
117 |
for demonstration in input_data.get("demonstrations",[]):
|
118 |
query = demonstration["query"]
|
119 |
response = demonstration["response"]
|
@@ -127,7 +237,13 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
127 |
def _state_update_add_chat_message(self,
|
128 |
role: str,
|
129 |
content: str) -> None:
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
acceptable_roles = [self.flow_config["system_name"],self.flow_config["user_name"],self.flow_config["assistant_name"]]
|
133 |
if role in acceptable_roles:
|
@@ -149,6 +265,12 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
149 |
self._log_message(chat_message)
|
150 |
|
151 |
def _get_previous_messages(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
all_messages = self.flow_state["previous_messages"]
|
153 |
first_k = self.flow_config["previous_messages"]["first_k"]
|
154 |
last_k = self.flow_config["previous_messages"]["last_k"]
|
@@ -162,7 +284,11 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
162 |
return all_messages[-last_k:]
|
163 |
|
164 |
def _call(self):
|
165 |
-
|
|
|
|
|
|
|
|
|
166 |
messages = self._get_previous_messages()
|
167 |
_success = False
|
168 |
attempts = 1
|
@@ -193,6 +319,11 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
193 |
return response
|
194 |
|
195 |
def _initialize_conversation(self, input_data: Dict[str, Any]):
|
|
|
|
|
|
|
|
|
|
|
196 |
# ~~~ Add the system message ~~~
|
197 |
system_message_content = self._get_message(self.system_message_prompt_template, input_data)
|
198 |
|
@@ -203,6 +334,12 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
203 |
self._add_demonstrations(input_data)
|
204 |
|
205 |
def _process_input(self, input_data: Dict[str, Any]):
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
if self._is_conversation_initialized():
|
207 |
# Construct the message using the human message prompt template
|
208 |
user_message_content = self._get_message(self.human_message_prompt_template, input_data)
|
@@ -219,8 +356,15 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
219 |
self._state_update_add_chat_message(role=self.flow_config["user_name"],
|
220 |
content=user_message_content)
|
221 |
|
222 |
-
def run(self,
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
# ~~~ Process input ~~~
|
225 |
self._process_input(input_data)
|
226 |
|
|
|
17 |
log = logging.get_logger(__name__)
|
18 |
|
19 |
|
20 |
+
class ChatAtomicFlow(AtomicFlow):
|
21 |
+
""" This class implements a ChatAtomicFlow. It is a flow that uses a LLM via an API to generate textuals responses to textual inputs.
|
22 |
+
It employs litellm as a backend to query the LLM via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
|
23 |
+
|
24 |
+
*Configuration Parameters*:
|
25 |
+
|
26 |
+
- `name` (str): The name of the flow. This name is used to identify the flow in the logs. Default: "ChatAtomicFlow"
|
27 |
+
- `description` (str): A description of the flow. This description is used to generate the help message of the flow.
|
28 |
+
Default: "Flow which uses as tool an LLM though an API"
|
29 |
+
- `enable_cache` (bool): Whether to enable cache for this flow. Default: True
|
30 |
+
- `n_api_retries` (int): The number of times to retry the API call in case of failure. Default: 6
|
31 |
+
- `wait_time_between_retries` (int): The number of seconds to wait between each API call when the call fails. Default: 20
|
32 |
+
- `system_name` (str): The name of the system (roles of LLM). Default: "system"
|
33 |
+
- `user_name` (str): The name of the user (roles of LLM). Default: "user"
|
34 |
+
- `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
|
35 |
+
- `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
|
36 |
+
See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
|
37 |
+
The default parameters of the backend are all defined at flows.backends.llm_lite.LiteLLMBackend
|
38 |
+
(also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
|
39 |
+
Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
|
40 |
+
- `model_name` (Union[Dict[str,str],str]): The name of the model to use.
|
41 |
+
When using multiple API providers, the model_name can be a dictionary of the form
|
42 |
+
{"provider_name": "model_name"}. E.g. {"openai": "gpt-3.5-turbo", "azure": "azure/gpt-3.5-turbo"}
|
43 |
+
Default: "gpt-3.5-turbo" (the name needs to follow the name of the model in litellm https://docs.litellm.ai/docs/providers).
|
44 |
+
- `n` (int) : The number of answers to generate. Default: 1
|
45 |
+
- `max_tokens` (int): The maximum number of tokens to generate. Default: 2000
|
46 |
+
- `temperature` float: The temperature of the generation. Default: 0.3
|
47 |
+
- `top_p` float: An alternative to sampling with temperature. It instructs the model to consider the results of
|
48 |
+
the tokens with top_p probability. Default: 0.2
|
49 |
+
- `frequency_penalty` (number): It is used to penalize new tokens based on their frequency in the text so far. Default: 0.0
|
50 |
+
- `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
|
51 |
+
- `stream` (bool): Whether to stream the response or not. Default: True
|
52 |
+
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
53 |
+
By default its of type flows.prompt_template.JinjaPrompt.
|
54 |
+
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
55 |
+
Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
|
56 |
+
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
57 |
+
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
58 |
+
By default its of type flows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
|
59 |
+
wants to use the init_human_message_prompt_template. Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
|
60 |
+
- `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
|
61 |
+
It's passed as the user message to the LLM. By default its of type flows.prompt_template.JinjaPrompt and has the following parameters:
|
62 |
+
- `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
|
63 |
+
- `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
|
64 |
+
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
65 |
+
all the messages of the flows's history are added to the input of the LLM. Default:
|
66 |
+
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
67 |
+
- `last_k` (int): If defined, adds the last_k latest messages of the flow's chat history to the input of the LLM. Default: None
|
68 |
+
|
69 |
+
|
70 |
+
*Input Interface Initialized (Expected input the first time in flow)*:
|
71 |
+
|
72 |
+
- `query` (str): The query given to the flow. (e.g. {"query":"What's the capital of Switzerland?"})
|
73 |
+
|
74 |
+
*Input Interface (Expected input the after the first time in flow)*:
|
75 |
+
|
76 |
+
- `query` (str): The query given to the flow. (e.g. {"query": "Are you sure of your answer?"})
|
77 |
+
|
78 |
+
*Output Interface*:
|
79 |
+
|
80 |
+
- `api_output` (str): The output of the API call. It is the response of the LLM to the input. (e.g. {"api_output": "The Capital of Switzerland is Bern"})
|
81 |
+
|
82 |
+
:param system_message_prompt_template: The template of the system message. It is used to generate the system message.
|
83 |
+
:type system_message_prompt_template: JinjaPrompt
|
84 |
+
:param human_message_prompt_template: The template of the human message. It is used to generate the human message.
|
85 |
+
:type human_message_prompt_template: JinjaPrompt
|
86 |
+
:param init_human_message_prompt_template: The template of the human message that is used to initialize the conversation (first time in). It is used to generate the human message.
|
87 |
+
:type init_human_message_prompt_template: Optional[JinjaPrompt]
|
88 |
+
:param backend: The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
|
89 |
+
:type backend: LiteLLMBackend
|
90 |
+
:param \**kwargs: Additional arguments to pass to the flow. See :class:`flows.base_flows.AtomicFlow` for more details.
|
91 |
+
"""
|
92 |
REQUIRED_KEYS_CONFIG = ["backend"]
|
93 |
|
94 |
SUPPORTS_CACHING: bool = True
|
|
|
117 |
], f"Flow name '{self.flow_config['name']}' cannot be 'system', 'user' or 'assistant'"
|
118 |
|
119 |
def set_up_flow_state(self):
|
120 |
+
""" This method sets up the state of the flow and clears the previous messages."""
|
121 |
super().set_up_flow_state()
|
122 |
self.flow_state["previous_messages"] = []
|
123 |
|
124 |
@classmethod
|
125 |
def _set_up_prompts(cls, config):
|
126 |
+
""" This method sets up the prompts of the flow. It instantiates the prompt templates.
|
127 |
+
|
128 |
+
:param config: The configuration of the flow.
|
129 |
+
:type config: Dict[str, Any]
|
130 |
+
:return: The instantiated prompts of the flow.
|
131 |
+
:rtype: Dict[str, Any]
|
132 |
+
"""
|
133 |
kwargs = {}
|
134 |
|
135 |
kwargs["system_message_prompt_template"] = \
|
|
|
143 |
|
144 |
@classmethod
|
145 |
def _set_up_backend(cls, config):
|
146 |
+
""" This method sets up the backend of the flow. It instantiates the backend.
|
147 |
+
|
148 |
+
:param config: The configuration of the flow.
|
149 |
+
:type config: Dict[str, Any]
|
150 |
+
:return: The instantiated backend of the flow.
|
151 |
+
:rtype: Dict[str, Any]
|
152 |
+
"""
|
153 |
kwargs = {}
|
154 |
|
155 |
kwargs["backend"] = \
|
|
|
159 |
|
160 |
@classmethod
|
161 |
def instantiate_from_config(cls, config):
|
162 |
+
""" This method instantiates the flow from a configuration file
|
163 |
+
|
164 |
+
:param config: The configuration of the flow.
|
165 |
+
:type config: Dict[str, Any]
|
166 |
+
:return: The instantiated flow.
|
167 |
+
:rtype: ChatAtomicFlow
|
168 |
+
"""
|
169 |
flow_config = deepcopy(config)
|
170 |
|
171 |
kwargs = {"flow_config": flow_config}
|
|
|
178 |
return cls(**kwargs)
|
179 |
|
180 |
def _is_conversation_initialized(self):
|
181 |
+
""" This method checks if the conversation is initialized or not.
|
182 |
+
|
183 |
+
:return: True if the conversation is initialized, False otherwise.
|
184 |
+
:rtype: bool
|
185 |
+
"""
|
186 |
if len(self.flow_state["previous_messages"]) > 0:
|
187 |
return True
|
188 |
|
189 |
return False
|
190 |
|
191 |
def get_interface_description(self):
|
192 |
+
""" This method returns the description of the flow's input and output interface.
|
193 |
+
|
194 |
+
:return: The description of the flow's interface.
|
195 |
+
:rtype: Dict[str, Any]
|
196 |
+
"""
|
197 |
if self._is_conversation_initialized():
|
198 |
|
199 |
return {"input": self.flow_config["input_interface_initialized"],
|
|
|
204 |
|
205 |
@staticmethod
|
206 |
def _get_message(prompt_template, input_data: Dict[str, Any]):
|
207 |
+
""" This method generates a message using a prompt template and input data which should contain the input variables of the prompt template.
|
208 |
+
|
209 |
+
:param prompt_template: The prompt template.
|
210 |
+
:type prompt_template: JinjaPrompt
|
211 |
+
:param input_data: The input data of the prompt template.
|
212 |
+
:type input_data: Dict[str, Any]
|
213 |
+
:return: The generated message.
|
214 |
+
"""
|
215 |
template_kwargs = {}
|
216 |
for input_variable in prompt_template.input_variables:
|
217 |
template_kwargs[input_variable] = input_data[input_variable]
|
218 |
msg_content = prompt_template.format(**template_kwargs)
|
219 |
return msg_content
|
220 |
|
|
|
|
|
|
|
|
|
|
|
221 |
def _add_demonstrations(self, input_data: Dict[str, Any]):
|
222 |
+
""" This method adds demonstrations to the flow (If any). The demonstrations should be passed from a DemonstrationFlow
|
223 |
|
224 |
+
:param input_data: The input data of the flow.
|
225 |
+
:type input_data: Dict[str, Any]
|
226 |
+
"""
|
227 |
for demonstration in input_data.get("demonstrations",[]):
|
228 |
query = demonstration["query"]
|
229 |
response = demonstration["response"]
|
|
|
237 |
def _state_update_add_chat_message(self,
|
238 |
role: str,
|
239 |
content: str) -> None:
|
240 |
+
""" This method adds a message to the flow's state.
|
241 |
+
|
242 |
+
:param role: The role of the message (e.g. "user", "assistant", "system").
|
243 |
+
:type role: str
|
244 |
+
:param content: The content of the message.
|
245 |
+
:type content: str
|
246 |
+
"""
|
247 |
|
248 |
acceptable_roles = [self.flow_config["system_name"],self.flow_config["user_name"],self.flow_config["assistant_name"]]
|
249 |
if role in acceptable_roles:
|
|
|
265 |
self._log_message(chat_message)
|
266 |
|
267 |
def _get_previous_messages(self):
|
268 |
+
""" This method returns the previous messages of the flow. If first_k is set, it returns the first k messages. If last_k is set, it returns the last k messages.
|
269 |
+
If both are set, it returns the first k messages and the last k messages. If none are set, it returns all the messages.
|
270 |
+
|
271 |
+
:return: The previous messages of the flow.
|
272 |
+
:rtype: List[Dict[str, Any]]
|
273 |
+
"""
|
274 |
all_messages = self.flow_state["previous_messages"]
|
275 |
first_k = self.flow_config["previous_messages"]["first_k"]
|
276 |
last_k = self.flow_config["previous_messages"]["last_k"]
|
|
|
284 |
return all_messages[-last_k:]
|
285 |
|
286 |
def _call(self):
|
287 |
+
""" This method calls the backend of the flow (so queries the LLM). It calls the backend with the previous messages of the flow.
|
288 |
+
|
289 |
+
:return: The output of the backend.
|
290 |
+
:rtype: Any
|
291 |
+
"""
|
292 |
messages = self._get_previous_messages()
|
293 |
_success = False
|
294 |
attempts = 1
|
|
|
319 |
return response
|
320 |
|
321 |
def _initialize_conversation(self, input_data: Dict[str, Any]):
|
322 |
+
""" This method initializes the conversation (occurs the first time in). It adds the system message and potentially the demonstrations (if any).
|
323 |
+
|
324 |
+
:param input_data: The input data of the flow.
|
325 |
+
:type input_data: Dict[str, Any]
|
326 |
+
"""
|
327 |
# ~~~ Add the system message ~~~
|
328 |
system_message_content = self._get_message(self.system_message_prompt_template, input_data)
|
329 |
|
|
|
334 |
self._add_demonstrations(input_data)
|
335 |
|
336 |
def _process_input(self, input_data: Dict[str, Any]):
|
337 |
+
""" This method processes the input of the flow. It adds the human message to the flow's state. If the conversation is not initialized, it also initializes it
|
338 |
+
(adding the system message and potentially the demonstrations).
|
339 |
+
|
340 |
+
:param input_data: The input data of the flow.
|
341 |
+
:type input_data: Dict[str, Any]
|
342 |
+
"""
|
343 |
if self._is_conversation_initialized():
|
344 |
# Construct the message using the human message prompt template
|
345 |
user_message_content = self._get_message(self.human_message_prompt_template, input_data)
|
|
|
356 |
self._state_update_add_chat_message(role=self.flow_config["user_name"],
|
357 |
content=user_message_content)
|
358 |
|
359 |
+
def run(self,input_data: Dict[str, Any]):
|
360 |
+
""" This method runs the flow. It processes the input, calls the backend and updates the state of the flow.
|
361 |
+
|
362 |
+
:param input_data: The input data of the flow.
|
363 |
+
:type input_data: Dict[str, Any]
|
364 |
+
:return: The LLM's api output.
|
365 |
+
:rtype: Dict[str, Any]
|
366 |
+
"""
|
367 |
+
|
368 |
# ~~~ Process input ~~~
|
369 |
self._process_input(input_data)
|
370 |
|
OpenAIChatAtomicFlow.yaml → ChatAtomicFlow.yaml
RENAMED
@@ -1,4 +1,6 @@
|
|
1 |
# This is an abstract flow, therefore some required fields are not defined (and must be defined by the concrete flow)
|
|
|
|
|
2 |
enable_cache: True
|
3 |
|
4 |
n_api_retries: 6
|
@@ -46,8 +48,5 @@ previous_messages:
|
|
46 |
first_k: null # Note that the first message is the system prompt
|
47 |
last_k: null
|
48 |
|
49 |
-
demonstrations: null
|
50 |
-
demonstrations_response_template: null
|
51 |
-
|
52 |
output_interface:
|
53 |
- "api_output"
|
|
|
1 |
# This is an abstract flow, therefore some required fields are not defined (and must be defined by the concrete flow)
|
2 |
+
name: ChatAtomicFlow
|
3 |
+
description: "Flow which uses as tool an LLM though an API"
|
4 |
enable_cache: True
|
5 |
|
6 |
n_api_retries: 6
|
|
|
48 |
first_k: null # Note that the first message is the system prompt
|
49 |
last_k: null
|
50 |
|
|
|
|
|
|
|
51 |
output_interface:
|
52 |
- "api_output"
|
README.md
CHANGED
@@ -1,26 +1,156 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
-
(TODO)
|
5 |
|
6 |
-
|
7 |
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
|
12 |
-
|
13 |
|
14 |
-
|
15 |
|
16 |
-
##
|
17 |
|
18 |
-
|
|
|
|
|
19 |
|
20 |
-
|
21 |
|
22 |
-
|
23 |
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
(Note that the interface might depend on the state of the Flow.)
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
|
|
4 |
|
5 |
+
# Table of Contents
|
6 |
|
7 |
+
* [ChatAtomicFlow](#ChatAtomicFlow)
|
8 |
+
* [ChatAtomicFlow](#ChatAtomicFlow.ChatAtomicFlow)
|
9 |
+
* [set\_up\_flow\_state](#ChatAtomicFlow.ChatAtomicFlow.set_up_flow_state)
|
10 |
+
* [instantiate\_from\_config](#ChatAtomicFlow.ChatAtomicFlow.instantiate_from_config)
|
11 |
+
* [get\_interface\_description](#ChatAtomicFlow.ChatAtomicFlow.get_interface_description)
|
12 |
+
* [run](#ChatAtomicFlow.ChatAtomicFlow.run)
|
13 |
|
14 |
+
<a id="ChatAtomicFlow"></a>
|
15 |
|
16 |
+
# ChatAtomicFlow
|
17 |
|
18 |
+
<a id="ChatAtomicFlow.ChatAtomicFlow"></a>
|
19 |
|
20 |
+
## ChatAtomicFlow Objects
|
21 |
|
22 |
+
```python
|
23 |
+
class ChatAtomicFlow(AtomicFlow)
|
24 |
+
```
|
25 |
|
26 |
+
This class implements a ChatAtomicFlow. It is a flow that uses a LLM via an API to generate textuals responses to textual inputs.
|
27 |
|
28 |
+
It employs litellm as a backend to query the LLM via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
|
29 |
|
30 |
+
*Configuration Parameters*:
|
31 |
+
|
32 |
+
- `name` (str): The name of the flow. This name is used to identify the flow in the logs. Default: "ChatAtomicFlow"
|
33 |
+
- `description` (str): A description of the flow. This description is used to generate the help message of the flow.
|
34 |
+
Default: "Flow which uses as tool an LLM though an API"
|
35 |
+
- `enable_cache` (bool): Whether to enable cache for this flow. Default: True
|
36 |
+
- `n_api_retries` (int): The number of times to retry the API call in case of failure. Default: 6
|
37 |
+
- `wait_time_between_retries` (int): The number of seconds to wait between each API call when the call fails. Default: 20
|
38 |
+
- `system_name` (str): The name of the system (roles of LLM). Default: "system"
|
39 |
+
- `user_name` (str): The name of the user (roles of LLM). Default: "user"
|
40 |
+
- `assistant_name` (str): The name of the assistant (roles of LLM). Default: "assistant"
|
41 |
+
- `backend` Dict[str,Any]: The backend of the flow. Used to call models via an API.
|
42 |
+
See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers.
|
43 |
+
The default parameters of the backend are all defined at flows.backends.llm_lite.LiteLLMBackend
|
44 |
+
(also see the defaul parameters of litellm's completion parameters: https://docs.litellm.ai/docs/completion/input#input-params-1).
|
45 |
+
Except for the following parameters who are overwritten by the ChatAtomicFlow in ChatAtomicFlow.yaml:
|
46 |
+
- `model_name` (Union[Dict[str,str],str]): The name of the model to use.
|
47 |
+
When using multiple API providers, the model_name can be a dictionary of the form
|
48 |
+
{"provider_name": "model_name"}. E.g. {"openai": "gpt-3.5-turbo", "azure": "azure/gpt-3.5-turbo"}
|
49 |
+
Default: "gpt-3.5-turbo" (the name needs to follow the name of the model in litellm https://docs.litellm.ai/docs/providers).
|
50 |
+
- `n` (int) : The number of answers to generate. Default: 1
|
51 |
+
- `max_tokens` (int): The maximum number of tokens to generate. Default: 2000
|
52 |
+
- `temperature` float: The temperature of the generation. Default: 0.3
|
53 |
+
- `top_p` float: An alternative to sampling with temperature. It instructs the model to consider the results of
|
54 |
+
the tokens with top_p probability. Default: 0.2
|
55 |
+
- `frequency_penalty` (number): It is used to penalize new tokens based on their frequency in the text so far. Default: 0.0
|
56 |
+
- `presence_penalty` (number): It is used to penalize new tokens based on their existence in the text so far. Default: 0.0
|
57 |
+
- `stream` (bool): Whether to stream the response or not. Default: True
|
58 |
+
- `system_message_prompt_template` (Dict[str,Any]): The template of the system message. It is used to generate the system message.
|
59 |
+
By default its of type flows.prompt_template.JinjaPrompt.
|
60 |
+
None of the parameters of the prompt are defined by default and therefore need to be defined if one wants to use the system prompt.
|
61 |
+
Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
|
62 |
+
- `init_human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message used to initialize the conversation
|
63 |
+
(first time in). It is used to generate the human message. It's passed as the user message to the LLM.
|
64 |
+
By default its of type flows.prompt_template.JinjaPrompt. None of the parameters of the prompt are defined by default and therefore need to be defined if one
|
65 |
+
wants to use the init_human_message_prompt_template. Default parameters are defined in flows.prompt_template.jinja2_prompts.JinjaPrompt.
|
66 |
+
- `human_message_prompt_template` (Dict[str,Any]): The prompt template of the human/user message (message used everytime the except the first time in).
|
67 |
+
It's passed as the user message to the LLM. By default its of type flows.prompt_template.JinjaPrompt and has the following parameters:
|
68 |
+
- `template` (str): The template of the human message. Default: see ChatAtomicFlow.yaml for the default value.
|
69 |
+
- `input_variables` (List[str]): The input variables of the human message prompt template. Default: ["query"]
|
70 |
+
- `previous_messages` (Dict[str,Any]): Defines which previous messages to include in the input of the LLM. Note that if `first_k`and `last_k` are both none,
|
71 |
+
all the messages of the flows's history are added to the input of the LLM. Default:
|
72 |
+
- `first_k` (int): If defined, adds the first_k earliest messages of the flow's chat history to the input of the LLM. Default: None
|
73 |
+
- `last_k` (int): If defined, adds the last_k latest messages of the flow's chat history to the input of the LLM. Default: None
|
74 |
+
|
75 |
+
|
76 |
+
*Input Interface Initialized (Expected input the first time in flow)*:
|
77 |
+
|
78 |
+
- `query` (str): The query given to the flow. (e.g. {"query":"What's the capital of Switzerland?"})
|
79 |
+
|
80 |
+
*Input Interface (Expected input the after the first time in flow)*:
|
81 |
+
|
82 |
+
- `query` (str): The query given to the flow. (e.g. {"query": "Are you sure of your answer?"})
|
83 |
+
|
84 |
+
*Output Interface*:
|
85 |
+
|
86 |
+
- `api_output` (str): The output of the API call. It is the response of the LLM to the input. (e.g. {"api_output": "The Capital of Switzerland is Bern"})
|
87 |
+
|
88 |
+
**Arguments**:
|
89 |
+
|
90 |
+
- `system_message_prompt_template` (`JinjaPrompt`): The template of the system message. It is used to generate the system message.
|
91 |
+
- `human_message_prompt_template` (`JinjaPrompt`): The template of the human message. It is used to generate the human message.
|
92 |
+
- `init_human_message_prompt_template` (`Optional[JinjaPrompt]`): The template of the human message that is used to initialize the conversation (first time in). It is used to generate the human message.
|
93 |
+
- `backend` (`LiteLLMBackend`): The backend of the flow. It is a LLM that is queried via an API. See litellm's supported models and APIs here: https://docs.litellm.ai/docs/providers
|
94 |
+
- `\**kwargs`: Additional arguments to pass to the flow. See :class:`flows.base_flows.AtomicFlow` for more details.
|
95 |
+
|
96 |
+
<a id="ChatAtomicFlow.ChatAtomicFlow.set_up_flow_state"></a>
|
97 |
+
|
98 |
+
#### set\_up\_flow\_state
|
99 |
+
|
100 |
+
```python
|
101 |
+
def set_up_flow_state()
|
102 |
+
```
|
103 |
+
|
104 |
+
This method sets up the state of the flow and clears the previous messages.
|
105 |
+
|
106 |
+
<a id="ChatAtomicFlow.ChatAtomicFlow.instantiate_from_config"></a>
|
107 |
+
|
108 |
+
#### instantiate\_from\_config
|
109 |
+
|
110 |
+
```python
|
111 |
+
@classmethod
|
112 |
+
def instantiate_from_config(cls, config)
|
113 |
+
```
|
114 |
+
|
115 |
+
This method instantiates the flow from a configuration file
|
116 |
+
|
117 |
+
**Arguments**:
|
118 |
+
|
119 |
+
- `config` (`Dict[str, Any]`): The configuration of the flow.
|
120 |
+
|
121 |
+
**Returns**:
|
122 |
+
|
123 |
+
`ChatAtomicFlow`: The instantiated flow.
|
124 |
+
|
125 |
+
<a id="ChatAtomicFlow.ChatAtomicFlow.get_interface_description"></a>
|
126 |
+
|
127 |
+
#### get\_interface\_description
|
128 |
+
|
129 |
+
```python
|
130 |
+
def get_interface_description()
|
131 |
+
```
|
132 |
+
|
133 |
+
This method returns the description of the flow's input and output interface.
|
134 |
+
|
135 |
+
**Returns**:
|
136 |
+
|
137 |
+
`Dict[str, Any]`: The description of the flow's interface.
|
138 |
+
|
139 |
+
<a id="ChatAtomicFlow.ChatAtomicFlow.run"></a>
|
140 |
+
|
141 |
+
#### run
|
142 |
+
|
143 |
+
```python
|
144 |
+
def run(input_data: Dict[str, Any])
|
145 |
+
```
|
146 |
+
|
147 |
+
This method runs the flow. It processes the input, calls the backend and updates the state of the flow.
|
148 |
+
|
149 |
+
**Arguments**:
|
150 |
+
|
151 |
+
- `input_data` (`Dict[str, Any]`): The input data of the flow.
|
152 |
+
|
153 |
+
**Returns**:
|
154 |
+
|
155 |
+
`Dict[str, Any]`: The LLM's api output.
|
156 |
|
|
__init__.py
CHANGED
@@ -1 +1 @@
|
|
1 |
-
from .
|
|
|
1 |
+
from .ChatAtomicFlow import ChatAtomicFlow
|
simpleQA.yaml → demo.yaml
RENAMED
@@ -8,8 +8,8 @@ output_interface: # Connector between the Flow's output and the caller
|
|
8 |
keys_to_rename:
|
9 |
api_output: answer # Rename the api_output to answer
|
10 |
|
11 |
-
flow: # Overrides the
|
12 |
-
_target_: aiflows.
|
13 |
|
14 |
name: "SimpleQA_Flow"
|
15 |
description: "A flow that answers questions."
|
@@ -18,34 +18,39 @@ flow: # Overrides the OpenAIChatAtomicFlow config
|
|
18 |
input_interface_non_initialized:
|
19 |
- "question"
|
20 |
|
21 |
-
# ~~~
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
n: 1
|
25 |
max_tokens: 3000
|
26 |
temperature: 0.3
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
presence_penalty: 0
|
32 |
|
33 |
n_api_retries: 6
|
34 |
wait_time_between_retries: 20
|
35 |
|
36 |
# ~~~ Prompt specification ~~~
|
37 |
system_message_prompt_template:
|
38 |
-
_target_:
|
39 |
template: |2-
|
40 |
You are a helpful chatbot that truthfully answers questions.
|
41 |
input_variables: []
|
42 |
partial_variables: {}
|
43 |
-
|
44 |
|
45 |
init_human_message_prompt_template:
|
46 |
-
_target_:
|
47 |
template: |2-
|
48 |
Answer the following question: {{question}}
|
49 |
input_variables: ["question"]
|
50 |
partial_variables: {}
|
51 |
-
|
|
|
8 |
keys_to_rename:
|
9 |
api_output: answer # Rename the api_output to answer
|
10 |
|
11 |
+
flow: # Overrides the ChatAtomicFlow config
|
12 |
+
_target_: aiflows.ChatFlowModule.ChatAtomicFlow.instantiate_from_default_config
|
13 |
|
14 |
name: "SimpleQA_Flow"
|
15 |
description: "A flow that answers questions."
|
|
|
18 |
input_interface_non_initialized:
|
19 |
- "question"
|
20 |
|
21 |
+
# ~~~ backend model parameters ~~
|
22 |
+
backend:
|
23 |
+
_target_: flows.backends.llm_lite.LiteLLMBackend
|
24 |
+
api_infos: ???
|
25 |
+
model_name:
|
26 |
+
openai: "gpt-3.5-turbo"
|
27 |
+
azure: "azure/gpt-4"
|
28 |
+
|
29 |
+
# ~~~ generation_parameters ~~
|
30 |
n: 1
|
31 |
max_tokens: 3000
|
32 |
temperature: 0.3
|
33 |
|
34 |
+
top_p: 0.2
|
35 |
+
frequency_penalty: 0
|
36 |
+
presence_penalty: 0
|
|
|
37 |
|
38 |
n_api_retries: 6
|
39 |
wait_time_between_retries: 20
|
40 |
|
41 |
# ~~~ Prompt specification ~~~
|
42 |
system_message_prompt_template:
|
43 |
+
_target_: flows.prompt_template.JinjaPrompt
|
44 |
template: |2-
|
45 |
You are a helpful chatbot that truthfully answers questions.
|
46 |
input_variables: []
|
47 |
partial_variables: {}
|
48 |
+
|
49 |
|
50 |
init_human_message_prompt_template:
|
51 |
+
_target_: flows.prompt_template.JinjaPrompt
|
52 |
template: |2-
|
53 |
Answer the following question: {{question}}
|
54 |
input_variables: ["question"]
|
55 |
partial_variables: {}
|
56 |
+
|
pip_requirements.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
# ToDo
|
|
|
|
pip_requirements.txt
ADDED
File without changes
|
run.py
CHANGED
@@ -16,7 +16,7 @@ CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
|
|
16 |
logging.set_verbosity_debug()
|
17 |
|
18 |
dependencies = [
|
19 |
-
{"url": "aiflows/
|
20 |
]
|
21 |
from flows import flow_verse
|
22 |
flow_verse.sync_dependencies(dependencies)
|
@@ -35,12 +35,11 @@ if __name__ == "__main__":
|
|
35 |
# api_version = os.getenv("AZURE_API_VERSION") )
|
36 |
|
37 |
root_dir = "."
|
38 |
-
cfg_path = os.path.join(root_dir, "
|
39 |
cfg = read_yaml_file(cfg_path)
|
40 |
|
41 |
cfg["flow"]["backend"]["api_infos"] = api_information
|
42 |
# ~~~ Instantiate the Flow ~~~
|
43 |
-
# ~~~ Instantiate the Flow ~~~
|
44 |
flow_with_interfaces = {
|
45 |
"flow": hydra.utils.instantiate(cfg['flow'], _recursive_=False, _convert_="partial"),
|
46 |
"input_interface": (
|
|
|
16 |
logging.set_verbosity_debug()
|
17 |
|
18 |
dependencies = [
|
19 |
+
{"url": "aiflows/ChatFlowModule", "revision": os.getcwd()},
|
20 |
]
|
21 |
from flows import flow_verse
|
22 |
flow_verse.sync_dependencies(dependencies)
|
|
|
35 |
# api_version = os.getenv("AZURE_API_VERSION") )
|
36 |
|
37 |
root_dir = "."
|
38 |
+
cfg_path = os.path.join(root_dir, "demo.yaml")
|
39 |
cfg = read_yaml_file(cfg_path)
|
40 |
|
41 |
cfg["flow"]["backend"]["api_infos"] = api_information
|
42 |
# ~~~ Instantiate the Flow ~~~
|
|
|
43 |
flow_with_interfaces = {
|
44 |
"flow": hydra.utils.instantiate(cfg['flow'], _recursive_=False, _convert_="partial"),
|
45 |
"input_interface": (
|