Spaces:
No application file
No application file
martinakaduc
commited on
Commit
•
f3305db
1
Parent(s):
8aa06fd
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .gitignore +37 -0
- .ipynb_checkpoints/format-checkpoint.sh +71 -0
- .pylintrc +449 -0
- Dockerfile +13 -0
- LICENSE +201 -0
- README.md +354 -7
- assets/demo_narrow.gif +3 -0
- assets/qa_browser.png +0 -0
- assets/screenshot_cli.png +0 -0
- assets/screenshot_gui.png +0 -0
- assets/server_arch.png +0 -0
- assets/vicuna_logo.jpeg +0 -0
- docker/Dockerfile +7 -0
- docker/docker-compose.yml +36 -0
- docs/arena.md +48 -0
- docs/awq.md +71 -0
- docs/commands/conv_release.md +38 -0
- docs/commands/data_cleaning.md +19 -0
- docs/commands/leaderboard.md +37 -0
- docs/commands/local_cluster.md +38 -0
- docs/commands/pypi.md +11 -0
- docs/commands/webserver.md +94 -0
- docs/dashinfer_integration.md +23 -0
- docs/dataset_release.md +6 -0
- docs/exllama_v2.md +63 -0
- docs/gptq.md +59 -0
- docs/langchain_integration.md +90 -0
- docs/lightllm_integration.md +18 -0
- docs/mlx_integration.md +23 -0
- docs/model_support.md +140 -0
- docs/openai_api.md +152 -0
- docs/server_arch.md +2 -0
- docs/third_party_ui.md +24 -0
- docs/training.md +118 -0
- docs/vicuna_weights_version.md +97 -0
- docs/vllm_integration.md +25 -0
- docs/xFasterTransformer.md +90 -0
- fastchat/.ipynb_checkpoints/constants-checkpoint.py +294 -0
- fastchat/.ipynb_checkpoints/conversation-checkpoint.py +2117 -0
- fastchat/.ipynb_checkpoints/utils-checkpoint.py +484 -0
- fastchat/__init__.py +1 -0
- fastchat/constants.py +294 -0
- fastchat/conversation.py +2117 -0
- fastchat/data/__init__.py +0 -0
- fastchat/data/clean_sharegpt.py +232 -0
- fastchat/data/convert_alpaca.py +38 -0
- fastchat/data/extract_gpt4_only.py +32 -0
- fastchat/data/extract_single_round.py +29 -0
- fastchat/data/filter_wrong_format.py +44 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/demo_narrow.gif filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python
|
2 |
+
__pycache__
|
3 |
+
*.pyc
|
4 |
+
*.egg-info
|
5 |
+
dist
|
6 |
+
.venv
|
7 |
+
|
8 |
+
# Log
|
9 |
+
*.log
|
10 |
+
*.log.*
|
11 |
+
*.json
|
12 |
+
!playground/deepspeed_config_s2.json
|
13 |
+
!playground/deepspeed_config_s3.json
|
14 |
+
|
15 |
+
# Editor
|
16 |
+
.idea
|
17 |
+
*.swp
|
18 |
+
|
19 |
+
# Other
|
20 |
+
.DS_Store
|
21 |
+
wandb
|
22 |
+
output
|
23 |
+
checkpoints_flant5_3b
|
24 |
+
|
25 |
+
# Data
|
26 |
+
*.pkl
|
27 |
+
*.csv
|
28 |
+
tests/state_of_the_union.txt
|
29 |
+
|
30 |
+
# Build
|
31 |
+
build
|
32 |
+
|
33 |
+
# Image data
|
34 |
+
serve_images
|
35 |
+
val2014
|
36 |
+
vqa_examples
|
37 |
+
api_endpoints.json
|
.ipynb_checkpoints/format-checkpoint.sh
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
|
3 |
+
# Adapted from https://github.com/skypilot-org/skypilot/blob/master/format.sh
|
4 |
+
|
5 |
+
# Cause the script to exit if a single command fails
|
6 |
+
set -eo pipefail
|
7 |
+
|
8 |
+
# this stops git rev-parse from failing if we run this from the .git directory
|
9 |
+
builtin cd "$(dirname "${BASH_SOURCE:-$0}")"
|
10 |
+
ROOT="$(git rev-parse --show-toplevel)"
|
11 |
+
builtin cd "$ROOT" || exit 1
|
12 |
+
|
13 |
+
BLACK_VERSION=$(black --version | head -n 1 | awk '{print $2}')
|
14 |
+
PYLINT_VERSION=$(pylint --version | head -n 1 | awk '{print $2}')
|
15 |
+
|
16 |
+
# # params: tool name, tool version, required version
|
17 |
+
tool_version_check() {
|
18 |
+
if [[ $2 != $3 ]]; then
|
19 |
+
echo "Wrong $1 version installed: $3 is required, not $2."
|
20 |
+
exit 1
|
21 |
+
fi
|
22 |
+
}
|
23 |
+
|
24 |
+
tool_version_check "black" $BLACK_VERSION "23.3.0"
|
25 |
+
tool_version_check "pylint" $PYLINT_VERSION "2.8.2"
|
26 |
+
|
27 |
+
# Format files that differ from main branch. Ignores dirs that are not slated
|
28 |
+
# for autoformat yet.
|
29 |
+
format_changed() {
|
30 |
+
# The `if` guard ensures that the list of filenames is not empty, which
|
31 |
+
# could cause yapf to receive 0 positional arguments, making it hang
|
32 |
+
# waiting for STDIN.
|
33 |
+
#
|
34 |
+
# `diff-filter=ACM` and $MERGEBASE is to ensure we only format files that
|
35 |
+
# exist on both branches.
|
36 |
+
MERGEBASE="$(git merge-base origin/main HEAD)"
|
37 |
+
|
38 |
+
if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
|
39 |
+
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs -P 5 black
|
40 |
+
fi
|
41 |
+
}
|
42 |
+
|
43 |
+
## This flag formats individual files. --files *must* be the first command line
|
44 |
+
## arg to use this option.
|
45 |
+
if [[ "$1" == '--files' ]]; then
|
46 |
+
black "${@:2}"
|
47 |
+
# If `--all` is passed, then any further arguments are ignored and the
|
48 |
+
# entire python directory is formatted.
|
49 |
+
elif [[ "$1" == '--all' ]]; then
|
50 |
+
# Format all files
|
51 |
+
black fastchat
|
52 |
+
else
|
53 |
+
# Format only the files that changed in last commit.
|
54 |
+
format_changed
|
55 |
+
fi
|
56 |
+
echo 'FastChat Black: Done'
|
57 |
+
|
58 |
+
# Run Pylint
|
59 |
+
echo 'FastChat Pylint:'
|
60 |
+
pylint fastchat
|
61 |
+
# TODO(suquark): disable 'pylint_quotes' for now due to too many inconsistent quotes
|
62 |
+
# pylint --load-plugins pylint_quotes fastchat
|
63 |
+
|
64 |
+
if ! git diff --quiet &>/dev/null; then
|
65 |
+
echo 'Reformatted files. Please review and stage the changes.'
|
66 |
+
echo 'Changes not staged for commit:'
|
67 |
+
echo
|
68 |
+
git --no-pager diff --name-only
|
69 |
+
|
70 |
+
exit 1
|
71 |
+
fi
|
.pylintrc
ADDED
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This Pylint rcfile contains a best-effort configuration to uphold the
|
2 |
+
# best-practices and style described in the Google Python style guide:
|
3 |
+
# https://google.github.io/styleguide/pyguide.html
|
4 |
+
#
|
5 |
+
# Its canonical open-source location is:
|
6 |
+
# https://google.github.io/styleguide/pylintrc
|
7 |
+
|
8 |
+
[MASTER]
|
9 |
+
|
10 |
+
# Files or directories to be skipped. They should be base names, not paths.
|
11 |
+
ignore=third_party,ray_patches,providers
|
12 |
+
|
13 |
+
# Files or directories matching the regex patterns are skipped. The regex
|
14 |
+
# matches against base names, not paths.
|
15 |
+
ignore-patterns=
|
16 |
+
|
17 |
+
# Pickle collected data for later comparisons.
|
18 |
+
persistent=no
|
19 |
+
|
20 |
+
# List of plugins (as comma separated values of python modules names) to load,
|
21 |
+
# usually to register additional checkers.
|
22 |
+
load-plugins=
|
23 |
+
|
24 |
+
# Use multiple processes to speed up Pylint.
|
25 |
+
jobs=4
|
26 |
+
|
27 |
+
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
28 |
+
# active Python interpreter and may run arbitrary code.
|
29 |
+
unsafe-load-any-extension=no
|
30 |
+
|
31 |
+
|
32 |
+
[MESSAGES CONTROL]
|
33 |
+
|
34 |
+
# Only show warnings with the listed confidence levels. Leave empty to show
|
35 |
+
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
36 |
+
confidence=
|
37 |
+
|
38 |
+
# Enable the message, report, category or checker with the given id(s). You can
|
39 |
+
# either give multiple identifier separated by comma (,) or put this option
|
40 |
+
# multiple time (only on the command line, not in the configuration file where
|
41 |
+
# it should appear only once). See also the "--disable" option for examples.
|
42 |
+
#enable=
|
43 |
+
|
44 |
+
# Disable the message, report, category or checker with the given id(s). You
|
45 |
+
# can either give multiple identifiers separated by comma (,) or put this
|
46 |
+
# option multiple times (only on the command line, not in the configuration
|
47 |
+
# file where it should appear only once).You can also use "--disable=all" to
|
48 |
+
# disable everything first and then reenable specific checks. For example, if
|
49 |
+
# you want to run only the similarities checker, you can use "--disable=all
|
50 |
+
# --enable=similarities". If you want to run only the classes checker, but have
|
51 |
+
# no Warning level messages displayed, use"--disable=all --enable=classes
|
52 |
+
# --disable=W"
|
53 |
+
disable=abstract-method,
|
54 |
+
apply-builtin,
|
55 |
+
arguments-differ,
|
56 |
+
attribute-defined-outside-init,
|
57 |
+
backtick,
|
58 |
+
bad-option-value,
|
59 |
+
basestring-builtin,
|
60 |
+
buffer-builtin,
|
61 |
+
c-extension-no-member,
|
62 |
+
consider-using-enumerate,
|
63 |
+
cmp-builtin,
|
64 |
+
cmp-method,
|
65 |
+
coerce-builtin,
|
66 |
+
coerce-method,
|
67 |
+
delslice-method,
|
68 |
+
div-method,
|
69 |
+
duplicate-code,
|
70 |
+
eq-without-hash,
|
71 |
+
execfile-builtin,
|
72 |
+
file-builtin,
|
73 |
+
filter-builtin-not-iterating,
|
74 |
+
fixme,
|
75 |
+
getslice-method,
|
76 |
+
global-statement,
|
77 |
+
hex-method,
|
78 |
+
idiv-method,
|
79 |
+
implicit-str-concat-in-sequence,
|
80 |
+
import-error,
|
81 |
+
import-self,
|
82 |
+
import-star-module-level,
|
83 |
+
inconsistent-return-statements,
|
84 |
+
input-builtin,
|
85 |
+
intern-builtin,
|
86 |
+
invalid-str-codec,
|
87 |
+
locally-disabled,
|
88 |
+
logging-format-interpolation, # FIXME(sky): make pass.
|
89 |
+
logging-fstring-interpolation, # FIXME(sky): make pass.
|
90 |
+
long-builtin,
|
91 |
+
long-suffix,
|
92 |
+
map-builtin-not-iterating,
|
93 |
+
misplaced-comparison-constant,
|
94 |
+
missing-function-docstring,
|
95 |
+
metaclass-assignment,
|
96 |
+
next-method-called,
|
97 |
+
next-method-defined,
|
98 |
+
no-absolute-import,
|
99 |
+
no-else-break,
|
100 |
+
no-else-continue,
|
101 |
+
no-else-raise,
|
102 |
+
no-else-return,
|
103 |
+
no-init, # added
|
104 |
+
no-member,
|
105 |
+
no-name-in-module,
|
106 |
+
no-self-use,
|
107 |
+
nonzero-method,
|
108 |
+
oct-method,
|
109 |
+
old-division,
|
110 |
+
old-ne-operator,
|
111 |
+
old-octal-literal,
|
112 |
+
old-raise-syntax,
|
113 |
+
parameter-unpacking,
|
114 |
+
print-statement,
|
115 |
+
raising-string,
|
116 |
+
range-builtin-not-iterating,
|
117 |
+
raw_input-builtin,
|
118 |
+
rdiv-method,
|
119 |
+
reduce-builtin,
|
120 |
+
relative-import,
|
121 |
+
reload-builtin,
|
122 |
+
round-builtin,
|
123 |
+
setslice-method,
|
124 |
+
signature-differs,
|
125 |
+
standarderror-builtin,
|
126 |
+
suppressed-message,
|
127 |
+
sys-max-int,
|
128 |
+
too-few-public-methods,
|
129 |
+
too-many-ancestors,
|
130 |
+
too-many-arguments,
|
131 |
+
too-many-boolean-expressions,
|
132 |
+
too-many-branches,
|
133 |
+
too-many-instance-attributes,
|
134 |
+
too-many-locals,
|
135 |
+
too-many-nested-blocks,
|
136 |
+
too-many-public-methods,
|
137 |
+
too-many-return-statements,
|
138 |
+
too-many-statements,
|
139 |
+
trailing-newlines,
|
140 |
+
unichr-builtin,
|
141 |
+
unicode-builtin,
|
142 |
+
unnecessary-pass,
|
143 |
+
unpacking-in-except,
|
144 |
+
useless-else-on-loop,
|
145 |
+
useless-object-inheritance,
|
146 |
+
useless-suppression,
|
147 |
+
using-cmp-argument,
|
148 |
+
wrong-import-order,
|
149 |
+
xrange-builtin,
|
150 |
+
zip-builtin-not-iterating,
|
151 |
+
|
152 |
+
|
153 |
+
[REPORTS]
|
154 |
+
|
155 |
+
# Set the output format. Available formats are text, parseable, colorized, msvs
|
156 |
+
# (visual studio) and html. You can also give a reporter class, eg
|
157 |
+
# mypackage.mymodule.MyReporterClass.
|
158 |
+
output-format=text
|
159 |
+
|
160 |
+
# Put messages in a separate file for each module / package specified on the
|
161 |
+
# command line instead of printing them on stdout. Reports (if any) will be
|
162 |
+
# written in a file name "pylint_global.[txt|html]". This option is deprecated
|
163 |
+
# and it will be removed in Pylint 2.0.
|
164 |
+
files-output=no
|
165 |
+
|
166 |
+
# Tells whether to display a full report or only the messages
|
167 |
+
reports=no
|
168 |
+
|
169 |
+
# Python expression which should return a note less than 10 (10 is the highest
|
170 |
+
# note). You have access to the variables errors warning, statement which
|
171 |
+
# respectively contain the number of errors / warnings messages and the total
|
172 |
+
# number of statements analyzed. This is used by the global evaluation report
|
173 |
+
# (RP0004).
|
174 |
+
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
175 |
+
|
176 |
+
# Template used to display messages. This is a python new-style format string
|
177 |
+
# used to format the message information. See doc for all details
|
178 |
+
#msg-template=
|
179 |
+
|
180 |
+
|
181 |
+
[BASIC]
|
182 |
+
|
183 |
+
# Good variable names which should always be accepted, separated by a comma
|
184 |
+
good-names=main,_
|
185 |
+
|
186 |
+
# Bad variable names which should always be refused, separated by a comma
|
187 |
+
bad-names=
|
188 |
+
|
189 |
+
# Colon-delimited sets of names that determine each other's naming style when
|
190 |
+
# the name regexes allow several styles.
|
191 |
+
name-group=
|
192 |
+
|
193 |
+
# Include a hint for the correct naming format with invalid-name
|
194 |
+
include-naming-hint=no
|
195 |
+
|
196 |
+
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
197 |
+
# to this list to register other decorators that produce valid properties.
|
198 |
+
property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl
|
199 |
+
|
200 |
+
# Regular expression matching correct function names
|
201 |
+
function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
|
202 |
+
|
203 |
+
# Regular expression matching correct variable names
|
204 |
+
variable-rgx=^[a-z][a-z0-9_]*$
|
205 |
+
|
206 |
+
# Regular expression matching correct constant names
|
207 |
+
const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
|
208 |
+
|
209 |
+
# Regular expression matching correct attribute names
|
210 |
+
attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
|
211 |
+
|
212 |
+
# Regular expression matching correct argument names
|
213 |
+
argument-rgx=^[a-z][a-z0-9_]*$
|
214 |
+
|
215 |
+
# Regular expression matching correct class attribute names
|
216 |
+
class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
|
217 |
+
|
218 |
+
# Regular expression matching correct inline iteration names
|
219 |
+
inlinevar-rgx=^[a-z][a-z0-9_]*$
|
220 |
+
|
221 |
+
# Regular expression matching correct class names
|
222 |
+
class-rgx=^_?[A-Z][a-zA-Z0-9]*$
|
223 |
+
|
224 |
+
# Regular expression matching correct module names
|
225 |
+
module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$
|
226 |
+
|
227 |
+
# Regular expression matching correct method names
|
228 |
+
method-rgx=(?x)^(?:(?P<exempt>_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
|
229 |
+
|
230 |
+
# Regular expression which should only match function or class names that do
|
231 |
+
# not require a docstring.
|
232 |
+
no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$
|
233 |
+
|
234 |
+
# Minimum line length for functions/classes that require docstrings, shorter
|
235 |
+
# ones are exempt.
|
236 |
+
docstring-min-length=10
|
237 |
+
|
238 |
+
|
239 |
+
[TYPECHECK]
|
240 |
+
|
241 |
+
# List of decorators that produce context managers, such as
|
242 |
+
# contextlib.contextmanager. Add to this list to register other decorators that
|
243 |
+
# produce valid context managers.
|
244 |
+
contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager
|
245 |
+
|
246 |
+
# Tells whether missing members accessed in mixin class should be ignored. A
|
247 |
+
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
248 |
+
ignore-mixin-members=yes
|
249 |
+
|
250 |
+
# List of module names for which member attributes should not be checked
|
251 |
+
# (useful for modules/projects where namespaces are manipulated during runtime
|
252 |
+
# and thus existing member attributes cannot be deduced by static analysis. It
|
253 |
+
# supports qualified module names, as well as Unix pattern matching.
|
254 |
+
ignored-modules=
|
255 |
+
|
256 |
+
# List of class names for which member attributes should not be checked (useful
|
257 |
+
# for classes with dynamically set attributes). This supports the use of
|
258 |
+
# qualified names.
|
259 |
+
ignored-classes=optparse.Values,thread._local,_thread._local
|
260 |
+
|
261 |
+
# List of members which are set dynamically and missed by pylint inference
|
262 |
+
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
263 |
+
# expressions are accepted.
|
264 |
+
generated-members=
|
265 |
+
|
266 |
+
|
267 |
+
[FORMAT]
|
268 |
+
|
269 |
+
# Maximum number of characters on a single line.
|
270 |
+
max-line-length=100
|
271 |
+
|
272 |
+
# TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt
|
273 |
+
# lines made too long by directives to pytype.
|
274 |
+
|
275 |
+
# Regexp for a line that is allowed to be longer than the limit.
|
276 |
+
ignore-long-lines=(?x)(
|
277 |
+
^\s*(\#\ )?<?https?://\S+>?$|
|
278 |
+
^\s*(from\s+\S+\s+)?import\s+.+$)
|
279 |
+
|
280 |
+
# Allow the body of an if to be on the same line as the test if there is no
|
281 |
+
# else.
|
282 |
+
single-line-if-stmt=yes
|
283 |
+
|
284 |
+
# List of optional constructs for which whitespace checking is disabled. `dict-
|
285 |
+
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
286 |
+
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
287 |
+
# `empty-line` allows space-only lines.
|
288 |
+
no-space-check=
|
289 |
+
|
290 |
+
# Maximum number of lines in a module
|
291 |
+
max-module-lines=99999
|
292 |
+
|
293 |
+
# String used as indentation unit. The internal Google style guide mandates 2
|
294 |
+
# spaces. Google's externaly-published style guide says 4, consistent with
|
295 |
+
# PEP 8. Here we use 4 spaces.
|
296 |
+
indent-string=' '
|
297 |
+
|
298 |
+
# Number of spaces of indent required inside a hanging or continued line.
|
299 |
+
indent-after-paren=4
|
300 |
+
|
301 |
+
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
302 |
+
expected-line-ending-format=
|
303 |
+
|
304 |
+
|
305 |
+
[MISCELLANEOUS]
|
306 |
+
|
307 |
+
# List of note tags to take in consideration, separated by a comma.
|
308 |
+
notes=TODO
|
309 |
+
|
310 |
+
|
311 |
+
[STRING]
|
312 |
+
|
313 |
+
# This flag controls whether inconsistent-quotes generates a warning when the
|
314 |
+
# character used as a quote delimiter is used inconsistently within a module.
|
315 |
+
check-quote-consistency=yes
|
316 |
+
|
317 |
+
|
318 |
+
[VARIABLES]
|
319 |
+
|
320 |
+
# Tells whether we should check for unused import in __init__ files.
|
321 |
+
init-import=no
|
322 |
+
|
323 |
+
# A regular expression matching the name of dummy variables (i.e. expectedly
|
324 |
+
# not used).
|
325 |
+
dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
|
326 |
+
|
327 |
+
# List of additional names supposed to be defined in builtins. Remember that
|
328 |
+
# you should avoid to define new builtins when possible.
|
329 |
+
additional-builtins=
|
330 |
+
|
331 |
+
# List of strings which can identify a callback function by name. A callback
|
332 |
+
# name must start or end with one of those strings.
|
333 |
+
callbacks=cb_,_cb
|
334 |
+
|
335 |
+
# List of qualified module names which can have objects that can redefine
|
336 |
+
# builtins.
|
337 |
+
redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools
|
338 |
+
|
339 |
+
|
340 |
+
[LOGGING]
|
341 |
+
|
342 |
+
# Logging modules to check that the string format arguments are in logging
|
343 |
+
# function parameter format
|
344 |
+
logging-modules=logging,absl.logging,tensorflow.io.logging
|
345 |
+
|
346 |
+
|
347 |
+
[SIMILARITIES]
|
348 |
+
|
349 |
+
# Minimum lines number of a similarity.
|
350 |
+
min-similarity-lines=4
|
351 |
+
|
352 |
+
# Ignore comments when computing similarities.
|
353 |
+
ignore-comments=yes
|
354 |
+
|
355 |
+
# Ignore docstrings when computing similarities.
|
356 |
+
ignore-docstrings=yes
|
357 |
+
|
358 |
+
# Ignore imports when computing similarities.
|
359 |
+
ignore-imports=no
|
360 |
+
|
361 |
+
|
362 |
+
[SPELLING]
|
363 |
+
|
364 |
+
# Spelling dictionary name. Available dictionaries: none. To make it working
|
365 |
+
# install python-enchant package.
|
366 |
+
spelling-dict=
|
367 |
+
|
368 |
+
# List of comma separated words that should not be checked.
|
369 |
+
spelling-ignore-words=
|
370 |
+
|
371 |
+
# A path to a file that contains private dictionary; one word per line.
|
372 |
+
spelling-private-dict-file=
|
373 |
+
|
374 |
+
# Tells whether to store unknown words to indicated private dictionary in
|
375 |
+
# --spelling-private-dict-file option instead of raising a message.
|
376 |
+
spelling-store-unknown-words=no
|
377 |
+
|
378 |
+
|
379 |
+
[IMPORTS]
|
380 |
+
|
381 |
+
# Deprecated modules which should not be used, separated by a comma
|
382 |
+
deprecated-modules=regsub,
|
383 |
+
TERMIOS,
|
384 |
+
Bastion,
|
385 |
+
rexec,
|
386 |
+
sets
|
387 |
+
|
388 |
+
# Create a graph of every (i.e. internal and external) dependencies in the
|
389 |
+
# given file (report RP0402 must not be disabled)
|
390 |
+
import-graph=
|
391 |
+
|
392 |
+
# Create a graph of external dependencies in the given file (report RP0402 must
|
393 |
+
# not be disabled)
|
394 |
+
ext-import-graph=
|
395 |
+
|
396 |
+
# Create a graph of internal dependencies in the given file (report RP0402 must
|
397 |
+
# not be disabled)
|
398 |
+
int-import-graph=
|
399 |
+
|
400 |
+
# Force import order to recognize a module as part of the standard
|
401 |
+
# compatibility libraries.
|
402 |
+
known-standard-library=
|
403 |
+
|
404 |
+
# Force import order to recognize a module as part of a third party library.
|
405 |
+
known-third-party=enchant, absl
|
406 |
+
|
407 |
+
# Analyse import fallback blocks. This can be used to support both Python 2 and
|
408 |
+
# 3 compatible code, which means that the block might have code that exists
|
409 |
+
# only in one or another interpreter, leading to false positives when analysed.
|
410 |
+
analyse-fallback-blocks=no
|
411 |
+
|
412 |
+
|
413 |
+
[CLASSES]
|
414 |
+
|
415 |
+
# List of method names used to declare (i.e. assign) instance attributes.
|
416 |
+
defining-attr-methods=__init__,
|
417 |
+
__new__,
|
418 |
+
setUp
|
419 |
+
|
420 |
+
# List of member names, which should be excluded from the protected access
|
421 |
+
# warning.
|
422 |
+
exclude-protected=_asdict,
|
423 |
+
_fields,
|
424 |
+
_replace,
|
425 |
+
_source,
|
426 |
+
_make
|
427 |
+
|
428 |
+
# List of valid names for the first argument in a class method.
|
429 |
+
valid-classmethod-first-arg=cls,
|
430 |
+
class_
|
431 |
+
|
432 |
+
# List of valid names for the first argument in a metaclass class method.
|
433 |
+
valid-metaclass-classmethod-first-arg=mcs
|
434 |
+
|
435 |
+
|
436 |
+
[EXCEPTIONS]
|
437 |
+
|
438 |
+
# Exceptions that will emit a warning when being caught. Defaults to
|
439 |
+
# "Exception"
|
440 |
+
overgeneral-exceptions=StandardError,
|
441 |
+
Exception,
|
442 |
+
BaseException
|
443 |
+
|
444 |
+
#######
|
445 |
+
|
446 |
+
# https://github.com/edaniszewski/pylint-quotes#configuration
|
447 |
+
string-quote=single
|
448 |
+
triple-quote=double
|
449 |
+
docstring-quote=double
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM ubuntu:20.04
|
2 |
+
|
3 |
+
RUN apt-get update -y && apt-get install -y python3.9 python3.9-distutils curl
|
4 |
+
RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
5 |
+
RUN python3.9 get-pip.py
|
6 |
+
|
7 |
+
RUN mkdir /root/fastchat
|
8 |
+
COPY . /root/fastchat
|
9 |
+
WORKDIR /root/fastchat
|
10 |
+
RUN pip3 install -e ".[model_worker,webui]"
|
11 |
+
|
12 |
+
EXPOSE 7860
|
13 |
+
CMD python3.9 -m fastchat.serve.gradio_web_server --gradio-root-path /melt --controller "" --share --register ./api_endpoints.json
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
README.md
CHANGED
@@ -1,12 +1,359 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.42.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: melt
|
3 |
+
app_file: fastchat/serve
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.42.0
|
|
|
|
|
6 |
---
|
7 |
+
# FastChat
|
8 |
+
| [**Demo**](https://chat.lmsys.org/) | [**Discord**](https://discord.gg/HSWAKCrnFx) | [**X**](https://x.com/lmsysorg) |
|
9 |
|
10 |
+
FastChat is an open platform for training, serving, and evaluating large language model based chatbots.
|
11 |
+
- FastChat powers Chatbot Arena (https://chat.lmsys.org/), serving over 10 million chat requests for 70+ LLMs.
|
12 |
+
- Chatbot Arena has collected over 500K human votes from side-by-side LLM battles to compile an online [LLM Elo leaderboard](https://leaderboard.lmsys.org).
|
13 |
+
|
14 |
+
FastChat's core features include:
|
15 |
+
- The training and evaluation code for state-of-the-art models (e.g., Vicuna, MT-Bench).
|
16 |
+
- A distributed multi-model serving system with web UI and OpenAI-compatible RESTful APIs.
|
17 |
+
|
18 |
+
## News
|
19 |
+
- [2024/03] 🔥 We released Chatbot Arena technical [report](https://arxiv.org/abs/2403.04132).
|
20 |
+
- [2023/09] We released **LMSYS-Chat-1M**, a large-scale real-world LLM conversation dataset. Read the [report](https://arxiv.org/abs/2309.11998).
|
21 |
+
- [2023/08] We released **Vicuna v1.5** based on Llama 2 with 4K and 16K context lengths. Download [weights](#vicuna-weights).
|
22 |
+
- [2023/07] We released **Chatbot Arena Conversations**, a dataset containing 33k conversations with human preferences. Download it [here](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations).
|
23 |
+
|
24 |
+
<details>
|
25 |
+
<summary>More</summary>
|
26 |
+
|
27 |
+
- [2023/08] We released **LongChat v1.5** based on Llama 2 with 32K context lengths. Download [weights](#longchat).
|
28 |
+
- [2023/06] We introduced **MT-bench**, a challenging multi-turn question set for evaluating chatbots. Check out the blog [post](https://lmsys.org/blog/2023-06-22-leaderboard/).
|
29 |
+
- [2023/06] We introduced **LongChat**, our long-context chatbots and evaluation tools. Check out the blog [post](https://lmsys.org/blog/2023-06-29-longchat/).
|
30 |
+
- [2023/05] We introduced **Chatbot Arena** for battles among LLMs. Check out the blog [post](https://lmsys.org/blog/2023-05-03-arena).
|
31 |
+
- [2023/03] We released **Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality**. Check out the blog [post](https://vicuna.lmsys.org).
|
32 |
+
|
33 |
+
</details>
|
34 |
+
|
35 |
+
<a href="https://chat.lmsys.org"><img src="assets/demo_narrow.gif" width="70%"></a>
|
36 |
+
|
37 |
+
## Contents
|
38 |
+
- [Install](#install)
|
39 |
+
- [Model Weights](#model-weights)
|
40 |
+
- [Inference with Command Line Interface](#inference-with-command-line-interface)
|
41 |
+
- [Serving with Web GUI](#serving-with-web-gui)
|
42 |
+
- [API](#api)
|
43 |
+
- [Evaluation](#evaluation)
|
44 |
+
- [Fine-tuning](#fine-tuning)
|
45 |
+
- [Citation](#citation)
|
46 |
+
|
47 |
+
## Install
|
48 |
+
|
49 |
+
### Method 1: With pip
|
50 |
+
|
51 |
+
```bash
|
52 |
+
pip3 install "fschat[model_worker,webui]"
|
53 |
+
```
|
54 |
+
|
55 |
+
### Method 2: From source
|
56 |
+
|
57 |
+
1. Clone this repository and navigate to the FastChat folder
|
58 |
+
```bash
|
59 |
+
git clone https://github.com/lm-sys/FastChat.git
|
60 |
+
cd FastChat
|
61 |
+
```
|
62 |
+
|
63 |
+
If you are running on Mac:
|
64 |
+
```bash
|
65 |
+
brew install rust cmake
|
66 |
+
```
|
67 |
+
|
68 |
+
2. Install Package
|
69 |
+
```bash
|
70 |
+
pip3 install --upgrade pip # enable PEP 660 support
|
71 |
+
pip3 install -e ".[model_worker,webui]"
|
72 |
+
```
|
73 |
+
|
74 |
+
## Model Weights
|
75 |
+
### Vicuna Weights
|
76 |
+
[Vicuna](https://lmsys.org/blog/2023-03-30-vicuna/) is based on Llama 2 and should be used under Llama's [model license](https://github.com/facebookresearch/llama/blob/main/LICENSE).
|
77 |
+
|
78 |
+
You can use the commands below to start chatting. It will automatically download the weights from Hugging Face repos.
|
79 |
+
Downloaded weights are stored in a `.cache` folder in the user's home folder (e.g., `~/.cache/huggingface/hub/<model_name>`).
|
80 |
+
|
81 |
+
See more command options and how to handle out-of-memory in the "Inference with Command Line Interface" section below.
|
82 |
+
|
83 |
+
**NOTE: `transformers>=4.31` is required for 16K versions.**
|
84 |
+
|
85 |
+
| Size | Chat Command | Hugging Face Repo |
|
86 |
+
| --- | --- | --- |
|
87 |
+
| 7B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5` | [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) |
|
88 |
+
| 7B-16k | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5-16k` | [lmsys/vicuna-7b-v1.5-16k](https://huggingface.co/lmsys/vicuna-7b-v1.5-16k) |
|
89 |
+
| 13B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-13b-v1.5` | [lmsys/vicuna-13b-v1.5](https://huggingface.co/lmsys/vicuna-13b-v1.5) |
|
90 |
+
| 13B-16k | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-13b-v1.5-16k` | [lmsys/vicuna-13b-v1.5-16k](https://huggingface.co/lmsys/vicuna-13b-v1.5-16k) |
|
91 |
+
| 33B | `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-33b-v1.3` | [lmsys/vicuna-33b-v1.3](https://huggingface.co/lmsys/vicuna-33b-v1.3) |
|
92 |
+
|
93 |
+
**Old weights**: see [docs/vicuna_weights_version.md](docs/vicuna_weights_version.md) for all versions of weights and their differences.
|
94 |
+
|
95 |
+
### Other Models
|
96 |
+
Besides Vicuna, we also released two additional models: [LongChat](https://lmsys.org/blog/2023-06-29-longchat/) and FastChat-T5.
|
97 |
+
You can use the commands below to chat with them. They will automatically download the weights from Hugging Face repos.
|
98 |
+
|
99 |
+
| Model | Chat Command | Hugging Face Repo |
|
100 |
+
| --- | --- | --- |
|
101 |
+
| LongChat-7B | `python3 -m fastchat.serve.cli --model-path lmsys/longchat-7b-32k-v1.5` | [lmsys/longchat-7b-32k](https://huggingface.co/lmsys/longchat-7b-32k-v1.5) |
|
102 |
+
| FastChat-T5-3B | `python3 -m fastchat.serve.cli --model-path lmsys/fastchat-t5-3b-v1.0` | [lmsys/fastchat-t5-3b-v1.0](https://huggingface.co/lmsys/fastchat-t5-3b-v1.0) |
|
103 |
+
|
104 |
+
## Inference with Command Line Interface
|
105 |
+
|
106 |
+
<a href="https://chat.lmsys.org"><img src="assets/screenshot_cli.png" width="70%"></a>
|
107 |
+
|
108 |
+
(Experimental Feature: You can specify `--style rich` to enable rich text output and better text streaming quality for some non-ASCII content. This may not work properly on certain terminals.)
|
109 |
+
|
110 |
+
#### Supported Models
|
111 |
+
FastChat supports a wide range of models, including
|
112 |
+
LLama 2, Vicuna, Alpaca, Baize, ChatGLM, Dolly, Falcon, FastChat-T5, GPT4ALL, Guanaco, MTP, OpenAssistant, OpenChat, RedPajama, StableLM, WizardLM, xDAN-AI and more.
|
113 |
+
|
114 |
+
See a complete list of supported models and instructions to add a new model [here](docs/model_support.md).
|
115 |
+
|
116 |
+
#### Single GPU
|
117 |
+
The command below requires around 14GB of GPU memory for Vicuna-7B and 28GB of GPU memory for Vicuna-13B.
|
118 |
+
See the ["Not Enough Memory" section](#not-enough-memory) below if you do not have enough memory.
|
119 |
+
`--model-path` can be a local folder or a Hugging Face repo name.
|
120 |
+
```
|
121 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5
|
122 |
+
```
|
123 |
+
|
124 |
+
#### Multiple GPUs
|
125 |
+
You can use model parallelism to aggregate GPU memory from multiple GPUs on the same machine.
|
126 |
+
```
|
127 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --num-gpus 2
|
128 |
+
```
|
129 |
+
|
130 |
+
Tips:
|
131 |
+
Sometimes the "auto" device mapping strategy in huggingface/transformers does not perfectly balance the memory allocation across multiple GPUs.
|
132 |
+
You can use `--max-gpu-memory` to specify the maximum memory per GPU for storing model weights.
|
133 |
+
This allows it to allocate more memory for activations, so you can use longer context lengths or larger batch sizes. For example,
|
134 |
+
|
135 |
+
```
|
136 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --num-gpus 2 --max-gpu-memory 8GiB
|
137 |
+
```
|
138 |
+
|
139 |
+
#### CPU Only
|
140 |
+
This runs on the CPU only and does not require GPU. It requires around 30GB of CPU memory for Vicuna-7B and around 60GB of CPU memory for Vicuna-13B.
|
141 |
+
```
|
142 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device cpu
|
143 |
+
```
|
144 |
+
|
145 |
+
Use Intel AI Accelerator AVX512_BF16/AMX to accelerate CPU inference.
|
146 |
+
```
|
147 |
+
CPU_ISA=amx python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device cpu
|
148 |
+
```
|
149 |
+
|
150 |
+
#### Metal Backend (Mac Computers with Apple Silicon or AMD GPUs)
|
151 |
+
Use `--device mps` to enable GPU acceleration on Mac computers (requires torch >= 2.0).
|
152 |
+
Use `--load-8bit` to turn on 8-bit compression.
|
153 |
+
```
|
154 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device mps --load-8bit
|
155 |
+
```
|
156 |
+
Vicuna-7B can run on a 32GB M1 Macbook with 1 - 2 words / second.
|
157 |
+
|
158 |
+
#### Intel XPU (Intel Data Center and Arc A-Series GPUs)
|
159 |
+
Install the [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/xpu/latest/tutorials/installation.html). Set the OneAPI environment variables:
|
160 |
+
```
|
161 |
+
source /opt/intel/oneapi/setvars.sh
|
162 |
+
```
|
163 |
+
|
164 |
+
Use `--device xpu` to enable XPU/GPU acceleration.
|
165 |
+
```
|
166 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device xpu
|
167 |
+
```
|
168 |
+
Vicuna-7B can run on an Intel Arc A770 16GB.
|
169 |
+
|
170 |
+
#### Ascend NPU
|
171 |
+
Install the [Ascend PyTorch Adapter](https://github.com/Ascend/pytorch). Set the CANN environment variables:
|
172 |
+
```
|
173 |
+
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
174 |
+
```
|
175 |
+
|
176 |
+
Use `--device npu` to enable NPU acceleration.
|
177 |
+
```
|
178 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --device npu
|
179 |
+
```
|
180 |
+
Vicuna-7B/13B can run on an Ascend NPU.
|
181 |
+
|
182 |
+
#### Not Enough Memory
|
183 |
+
If you do not have enough memory, you can enable 8-bit compression by adding `--load-8bit` to commands above.
|
184 |
+
This can reduce memory usage by around half with slightly degraded model quality.
|
185 |
+
It is compatible with the CPU, GPU, and Metal backend.
|
186 |
+
|
187 |
+
Vicuna-13B with 8-bit compression can run on a single GPU with 16 GB of VRAM, like an Nvidia RTX 3090, RTX 4080, T4, V100 (16GB), or an AMD RX 6800 XT.
|
188 |
+
|
189 |
+
```
|
190 |
+
python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5 --load-8bit
|
191 |
+
```
|
192 |
+
|
193 |
+
In addition to that, you can add `--cpu-offloading` to commands above to offload weights that don't fit on your GPU onto the CPU memory.
|
194 |
+
This requires 8-bit compression to be enabled and the bitsandbytes package to be installed, which is only available on linux operating systems.
|
195 |
+
|
196 |
+
#### More Platforms and Quantization
|
197 |
+
- For AMD GPU users, please install ROCm and [the ROCm version of PyTorch](https://pytorch.org/get-started/locally/) before you install FastChat. See also this [post](https://github.com/lm-sys/FastChat/issues/104#issuecomment-1613791563).
|
198 |
+
- FastChat supports ExLlama V2. See [docs/exllama_v2.md](/docs/exllama_v2.md).
|
199 |
+
- FastChat supports GPTQ 4bit inference with [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa). See [docs/gptq.md](/docs/gptq.md).
|
200 |
+
- FastChat supports AWQ 4bit inference with [mit-han-lab/llm-awq](https://github.com/mit-han-lab/llm-awq). See [docs/awq.md](/docs/awq.md).
|
201 |
+
- [MLC LLM](https://mlc.ai/mlc-llm/), backed by [TVM Unity](https://github.com/apache/tvm/tree/unity) compiler, deploys Vicuna natively on phones, consumer-class GPUs and web browsers via Vulkan, Metal, CUDA and WebGPU.
|
202 |
+
|
203 |
+
#### Use models from modelscope
|
204 |
+
For Chinese users, you can use models from www.modelscope.cn via specify the following environment variables.
|
205 |
+
```bash
|
206 |
+
export FASTCHAT_USE_MODELSCOPE=True
|
207 |
+
```
|
208 |
+
|
209 |
+
## Serving with Web GUI
|
210 |
+
|
211 |
+
<a href="https://chat.lmsys.org"><img src="assets/screenshot_gui.png" width="70%"></a>
|
212 |
+
|
213 |
+
To serve using the web UI, you need three main components: web servers that interface with users, model workers that host one or more models, and a controller to coordinate the webserver and model workers. You can learn more about the architecture [here](docs/server_arch.md).
|
214 |
+
|
215 |
+
Here are the commands to follow in your terminal:
|
216 |
+
|
217 |
+
#### Launch the controller
|
218 |
+
```bash
|
219 |
+
python3 -m fastchat.serve.controller
|
220 |
+
```
|
221 |
+
|
222 |
+
This controller manages the distributed workers.
|
223 |
+
|
224 |
+
#### Launch the model worker(s)
|
225 |
+
```bash
|
226 |
+
python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5
|
227 |
+
```
|
228 |
+
Wait until the process finishes loading the model and you see "Uvicorn running on ...". The model worker will register itself to the controller .
|
229 |
+
|
230 |
+
To ensure that your model worker is connected to your controller properly, send a test message using the following command:
|
231 |
+
```bash
|
232 |
+
python3 -m fastchat.serve.test_message --model-name vicuna-7b-v1.5
|
233 |
+
```
|
234 |
+
You will see a short output.
|
235 |
+
|
236 |
+
#### Launch the Gradio web server
|
237 |
+
```bash
|
238 |
+
python3 -m fastchat.serve.gradio_web_server
|
239 |
+
```
|
240 |
+
|
241 |
+
This is the user interface that users will interact with.
|
242 |
+
|
243 |
+
By following these steps, you will be able to serve your models using the web UI. You can open your browser and chat with a model now.
|
244 |
+
If the models do not show up, try to reboot the gradio web server.
|
245 |
+
|
246 |
+
#### (Optional): Advanced Features, Scalability, Third Party UI
|
247 |
+
- You can register multiple model workers to a single controller, which can be used for serving a single model with higher throughput or serving multiple models at the same time. When doing so, please allocate different GPUs and ports for different model workers.
|
248 |
+
```
|
249 |
+
# worker 0
|
250 |
+
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5 --controller http://localhost:21001 --port 31000 --worker http://localhost:31000
|
251 |
+
# worker 1
|
252 |
+
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.model_worker --model-path lmsys/fastchat-t5-3b-v1.0 --controller http://localhost:21001 --port 31001 --worker http://localhost:31001
|
253 |
+
```
|
254 |
+
- You can also launch a multi-tab gradio server, which includes the Chatbot Arena tabs.
|
255 |
+
```bash
|
256 |
+
python3 -m fastchat.serve.gradio_web_server_multi
|
257 |
+
```
|
258 |
+
- The default model worker based on huggingface/transformers has great compatibility but can be slow. If you want high-throughput batched serving, you can try [vLLM integration](docs/vllm_integration.md).
|
259 |
+
- If you want to host it on your own UI or third party UI, see [Third Party UI](docs/third_party_ui.md).
|
260 |
+
|
261 |
+
## API
|
262 |
+
### OpenAI-Compatible RESTful APIs & SDK
|
263 |
+
FastChat provides OpenAI-compatible APIs for its supported models, so you can use FastChat as a local drop-in replacement for OpenAI APIs.
|
264 |
+
The FastChat server is compatible with both [openai-python](https://github.com/openai/openai-python) library and cURL commands.
|
265 |
+
The REST API is capable of being executed from Google Colab free tier, as demonstrated in the [FastChat_API_GoogleColab.ipynb](https://github.com/lm-sys/FastChat/blob/main/playground/FastChat_API_GoogleColab.ipynb) notebook, available in our repository.
|
266 |
+
See [docs/openai_api.md](docs/openai_api.md).
|
267 |
+
|
268 |
+
### Hugging Face Generation APIs
|
269 |
+
See [fastchat/serve/huggingface_api.py](fastchat/serve/huggingface_api.py).
|
270 |
+
|
271 |
+
### LangChain Integration
|
272 |
+
See [docs/langchain_integration](docs/langchain_integration.md).
|
273 |
+
|
274 |
+
## Evaluation
|
275 |
+
We use MT-bench, a set of challenging multi-turn open-ended questions to evaluate models.
|
276 |
+
To automate the evaluation process, we prompt strong LLMs like GPT-4 to act as judges and assess the quality of the models' responses.
|
277 |
+
See instructions for running MT-bench at [fastchat/llm_judge](fastchat/llm_judge).
|
278 |
+
|
279 |
+
MT-bench is the new recommended way to benchmark your models. If you are still looking for the old 80 questions used in the vicuna blog post, please go to [vicuna-blog-eval](https://github.com/lm-sys/vicuna-blog-eval).
|
280 |
+
|
281 |
+
## Fine-tuning
|
282 |
+
### Data
|
283 |
+
|
284 |
+
Vicuna is created by fine-tuning a Llama base model using approximately 125K user-shared conversations gathered from ShareGPT.com with public APIs. To ensure data quality, we convert the HTML back to markdown and filter out some inappropriate or low-quality samples. Additionally, we divide lengthy conversations into smaller segments that fit the model's maximum context length. For detailed instructions to clean the ShareGPT data, check out [here](docs/commands/data_cleaning.md).
|
285 |
+
|
286 |
+
We will not release the ShareGPT dataset. If you would like to try the fine-tuning code, you can run it with some dummy conversations in [dummy_conversation.json](data/dummy_conversation.json). You can follow the same format and plug in your own data.
|
287 |
+
|
288 |
+
### Code and Hyperparameters
|
289 |
+
Our code is based on [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) with additional support for multi-turn conversations.
|
290 |
+
We use similar hyperparameters as the Stanford Alpaca.
|
291 |
+
|
292 |
+
| Hyperparameter | Global Batch Size | Learning rate | Epochs | Max length | Weight decay |
|
293 |
+
| --- | ---: | ---: | ---: | ---: | ---: |
|
294 |
+
| Vicuna-13B | 128 | 2e-5 | 3 | 2048 | 0 |
|
295 |
+
|
296 |
+
### Fine-tuning Vicuna-7B with Local GPUs
|
297 |
+
|
298 |
+
- Install dependency
|
299 |
+
```bash
|
300 |
+
pip3 install -e ".[train]"
|
301 |
+
```
|
302 |
+
|
303 |
+
- You can use the following command to train Vicuna-7B with 4 x A100 (40GB). Update `--model_name_or_path` with the actual path to Llama weights and `--data_path` with the actual path to data.
|
304 |
+
```bash
|
305 |
+
torchrun --nproc_per_node=4 --master_port=20001 fastchat/train/train_mem.py \
|
306 |
+
--model_name_or_path meta-llama/Llama-2-7b-hf \
|
307 |
+
--data_path data/dummy_conversation.json \
|
308 |
+
--bf16 True \
|
309 |
+
--output_dir output_vicuna \
|
310 |
+
--num_train_epochs 3 \
|
311 |
+
--per_device_train_batch_size 2 \
|
312 |
+
--per_device_eval_batch_size 2 \
|
313 |
+
--gradient_accumulation_steps 16 \
|
314 |
+
--evaluation_strategy "no" \
|
315 |
+
--save_strategy "steps" \
|
316 |
+
--save_steps 1200 \
|
317 |
+
--save_total_limit 10 \
|
318 |
+
--learning_rate 2e-5 \
|
319 |
+
--weight_decay 0. \
|
320 |
+
--warmup_ratio 0.03 \
|
321 |
+
--lr_scheduler_type "cosine" \
|
322 |
+
--logging_steps 1 \
|
323 |
+
--fsdp "full_shard auto_wrap" \
|
324 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
325 |
+
--tf32 True \
|
326 |
+
--model_max_length 2048 \
|
327 |
+
--gradient_checkpointing True \
|
328 |
+
--lazy_preprocess True
|
329 |
+
```
|
330 |
+
|
331 |
+
Tips:
|
332 |
+
- If you are using V100 which is not supported by FlashAttention, you can use the [memory-efficient attention](https://arxiv.org/abs/2112.05682) implemented in [xFormers](https://github.com/facebookresearch/xformers). Install xformers and replace `fastchat/train/train_mem.py` above with [fastchat/train/train_xformers.py](fastchat/train/train_xformers.py).
|
333 |
+
- If you meet out-of-memory due to "FSDP Warning: When using FSDP, it is efficient and recommended... ", see solutions [here](https://github.com/huggingface/transformers/issues/24724#issuecomment-1645189539).
|
334 |
+
- If you meet out-of-memory during model saving, see solutions [here](https://github.com/pytorch/pytorch/issues/98823).
|
335 |
+
- To turn on logging to popular experiment tracking tools such as Tensorboard, MLFlow or Weights & Biases, use the `report_to` argument, e.g. pass `--report_to wandb` to turn on logging to Weights & Biases.
|
336 |
+
|
337 |
+
### Other models, platforms and LoRA support
|
338 |
+
More instructions to train other models (e.g., FastChat-T5) and use LoRA are in [docs/training.md](docs/training.md).
|
339 |
+
|
340 |
+
### Fine-tuning on Any Cloud with SkyPilot
|
341 |
+
[SkyPilot](https://github.com/skypilot-org/skypilot) is a framework built by UC Berkeley for easily and cost effectively running ML workloads on any cloud (AWS, GCP, Azure, Lambda, etc.).
|
342 |
+
Find SkyPilot documentation [here](https://github.com/skypilot-org/skypilot/tree/master/llm/vicuna) on using managed spot instances to train Vicuna and save on your cloud costs.
|
343 |
+
|
344 |
+
## Citation
|
345 |
+
The code (training, serving, and evaluation) in this repository is mostly developed for or derived from the paper below.
|
346 |
+
Please cite it if you find the repository helpful.
|
347 |
+
|
348 |
+
```
|
349 |
+
@misc{zheng2023judging,
|
350 |
+
title={Judging LLM-as-a-judge with MT-Bench and Chatbot Arena},
|
351 |
+
author={Lianmin Zheng and Wei-Lin Chiang and Ying Sheng and Siyuan Zhuang and Zhanghao Wu and Yonghao Zhuang and Zi Lin and Zhuohan Li and Dacheng Li and Eric. P Xing and Hao Zhang and Joseph E. Gonzalez and Ion Stoica},
|
352 |
+
year={2023},
|
353 |
+
eprint={2306.05685},
|
354 |
+
archivePrefix={arXiv},
|
355 |
+
primaryClass={cs.CL}
|
356 |
+
}
|
357 |
+
```
|
358 |
+
|
359 |
+
We are also planning to add more of our research to this repository.
|
assets/demo_narrow.gif
ADDED
Git LFS Details
|
assets/qa_browser.png
ADDED
assets/screenshot_cli.png
ADDED
assets/screenshot_gui.png
ADDED
assets/server_arch.png
ADDED
assets/vicuna_logo.jpeg
ADDED
docker/Dockerfile
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM nvidia/cuda:12.2.0-runtime-ubuntu20.04
|
2 |
+
|
3 |
+
RUN apt-get update -y && apt-get install -y python3.9 python3.9-distutils curl
|
4 |
+
RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
5 |
+
RUN python3.9 get-pip.py
|
6 |
+
RUN pip3 install fschat
|
7 |
+
RUN pip3 install fschat[model_worker,webui]
|
docker/docker-compose.yml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: "3.9"
|
2 |
+
|
3 |
+
services:
|
4 |
+
fastchat-controller:
|
5 |
+
build:
|
6 |
+
context: .
|
7 |
+
dockerfile: Dockerfile
|
8 |
+
image: fastchat:latest
|
9 |
+
ports:
|
10 |
+
- "21001:21001"
|
11 |
+
entrypoint: ["python3.9", "-m", "fastchat.serve.controller", "--host", "0.0.0.0", "--port", "21001"]
|
12 |
+
fastchat-model-worker:
|
13 |
+
build:
|
14 |
+
context: .
|
15 |
+
dockerfile: Dockerfile
|
16 |
+
volumes:
|
17 |
+
- huggingface:/root/.cache/huggingface
|
18 |
+
image: fastchat:latest
|
19 |
+
deploy:
|
20 |
+
resources:
|
21 |
+
reservations:
|
22 |
+
devices:
|
23 |
+
- driver: nvidia
|
24 |
+
count: 1
|
25 |
+
capabilities: [gpu]
|
26 |
+
entrypoint: ["python3.9", "-m", "fastchat.serve.model_worker", "--model-names", "${FASTCHAT_WORKER_MODEL_NAMES:-vicuna-7b-v1.5}", "--model-path", "${FASTCHAT_WORKER_MODEL_PATH:-lmsys/vicuna-7b-v1.5}", "--worker-address", "http://fastchat-model-worker:21002", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "21002"]
|
27 |
+
fastchat-api-server:
|
28 |
+
build:
|
29 |
+
context: .
|
30 |
+
dockerfile: Dockerfile
|
31 |
+
image: fastchat:latest
|
32 |
+
ports:
|
33 |
+
- "8000:8000"
|
34 |
+
entrypoint: ["python3.9", "-m", "fastchat.serve.openai_api_server", "--controller-address", "http://fastchat-controller:21001", "--host", "0.0.0.0", "--port", "8000"]
|
35 |
+
volumes:
|
36 |
+
huggingface:
|
docs/arena.md
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Chatbot Arena
|
2 |
+
Chatbot Arena is an LLM benchmark platform featuring anonymous, randomized battles, available at https://chat.lmsys.org.
|
3 |
+
We invite the entire community to join this benchmarking effort by contributing your votes and models.
|
4 |
+
|
5 |
+
## How to add a new model
|
6 |
+
If you want to see a specific model in the arena, you can follow the methods below.
|
7 |
+
|
8 |
+
### Method 1: Hosted by 3rd party API providers or yourself
|
9 |
+
If you have a model hosted by a 3rd party API provider or yourself, please give us the access to an API endpoint.
|
10 |
+
- We prefer OpenAI-compatible APIs, so we can reuse our [code](https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/api_provider.py) for calling OpenAI models.
|
11 |
+
- If you have your own API protocol, please follow the [instructions](model_support.md) to add them. Contribute your code by sending a pull request.
|
12 |
+
|
13 |
+
### Method 2: Hosted by LMSYS
|
14 |
+
1. Contribute the code to support this model in FastChat by submitting a pull request. See [instructions](model_support.md).
|
15 |
+
2. After the model is supported, we will try to schedule some compute resources to host the model in the arena. However, due to the limited resources we have, we may not be able to serve every model. We will select the models based on popularity, quality, diversity, and other factors.
|
16 |
+
|
17 |
+
|
18 |
+
## How to launch vision arena
|
19 |
+
|
20 |
+
1. Run `python3 -m fastchat.serve.controller` to start the controller and begin registering local model workers and API-provided workers.
|
21 |
+
2. Run `python3 -m fastchat.serve.sglang_worker --model-path <model-path> --tokenizer-path <tokenizer-path>` to run local vision-language models. Currently supported models include the LLaVA and Yi-VL series.
|
22 |
+
3. If you are using a 3rd party model with an API provider (e.g. GPT-4-V, Gemini 1.5), please follow the instructions [model_support.md](model_support.md) to add a json file `api_endpoints.json`.
|
23 |
+
4. Run the gradio server with the `--vision-arena` flag on.
|
24 |
+
5. To run and store images into a remote directory, add the flag: `--use-remote-storage`
|
25 |
+
6. To run and allow samples of random questions, add `--random_questions metadata_sampled.json`. Check sections below for how to generate this.
|
26 |
+
|
27 |
+
Example command:
|
28 |
+
```
|
29 |
+
python3 -m fastchat.serve.gradio_web_server_multi --share --register-api-endpoint-file api_endpoints.json --vision-arena --use-remote-storage --random-questions metadata_sampled.json
|
30 |
+
```
|
31 |
+
|
32 |
+
### NSFW and CSAM Detection
|
33 |
+
1. Adding NSFW Endpoint and API key: Please add the following environment variables to run the NSFW moderation filter for images:
|
34 |
+
- `AZURE_IMG_MODERATION_ENDPOINT`: This is the endpoint that the NSFW moderator is hosted (e.g. https://{endpoint}/contentmoderator/moderate/v1.0/ProcessImage/Evaluate). Change the `endpoint` to your own.
|
35 |
+
- `AZURE_IMG_MODERATION_API_KEY`: Your API key to run this endpoint.
|
36 |
+
2. Adding CSAM API key:
|
37 |
+
- `PHOTODNA_API_KEY`: The API key that runs the CSAM detector endpoint.
|
38 |
+
|
39 |
+
Example in `~/.bashrc`:
|
40 |
+
```
|
41 |
+
export AZURE_IMG_MODERATION_ENDPOINT=https://<endpoint>/contentmoderator/moderate/v1.0/ProcessImage/Evaluate
|
42 |
+
export AZURE_IMG_MODERATION_API_KEY=<api-key>
|
43 |
+
export PHOTODNA_API_KEY=<api-key>
|
44 |
+
```
|
45 |
+
|
46 |
+
### Adding Random Samples for VQA
|
47 |
+
We provide random samples of example images for users to interact with coming from various datasets including DocVQA, RealWorldQA, ChartQA and VizWiz-VQA.
|
48 |
+
1. Download the images and generate random questions file by running `python fastchat/serve/vision/create_vqa_examples_dir.py`
|
docs/awq.md
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AWQ 4bit Inference
|
2 |
+
|
3 |
+
We integrated [AWQ](https://github.com/mit-han-lab/llm-awq) into FastChat to provide **efficient and accurate** 4bit LLM inference.
|
4 |
+
|
5 |
+
## Install AWQ
|
6 |
+
|
7 |
+
Setup environment (please refer to [this link](https://github.com/mit-han-lab/llm-awq#install) for more details):
|
8 |
+
```bash
|
9 |
+
conda create -n fastchat-awq python=3.10 -y
|
10 |
+
conda activate fastchat-awq
|
11 |
+
# cd /path/to/FastChat
|
12 |
+
pip install --upgrade pip # enable PEP 660 support
|
13 |
+
pip install -e . # install fastchat
|
14 |
+
|
15 |
+
git clone https://github.com/mit-han-lab/llm-awq repositories/llm-awq
|
16 |
+
cd repositories/llm-awq
|
17 |
+
pip install -e . # install awq package
|
18 |
+
|
19 |
+
cd awq/kernels
|
20 |
+
python setup.py install # install awq CUDA kernels
|
21 |
+
```
|
22 |
+
|
23 |
+
## Chat with the CLI
|
24 |
+
|
25 |
+
```bash
|
26 |
+
# Download quantized model from huggingface
|
27 |
+
# Make sure you have git-lfs installed (https://git-lfs.com)
|
28 |
+
git lfs install
|
29 |
+
git clone https://huggingface.co/mit-han-lab/vicuna-7b-v1.3-4bit-g128-awq
|
30 |
+
|
31 |
+
# You can specify which quantized model to use by setting --awq-ckpt
|
32 |
+
python3 -m fastchat.serve.cli \
|
33 |
+
--model-path models/vicuna-7b-v1.3-4bit-g128-awq \
|
34 |
+
--awq-wbits 4 \
|
35 |
+
--awq-groupsize 128
|
36 |
+
```
|
37 |
+
|
38 |
+
## Benchmark
|
39 |
+
|
40 |
+
* Through **4-bit weight quantization**, AWQ helps to run larger language models within the device memory restriction and prominently accelerates token generation. All benchmarks are done with group_size 128.
|
41 |
+
|
42 |
+
* Benchmark on NVIDIA RTX A6000:
|
43 |
+
|
44 |
+
| Model | Bits | Max Memory (MiB) | Speed (ms/token) | AWQ Speedup |
|
45 |
+
| --------------- | ---- | ---------------- | ---------------- | ----------- |
|
46 |
+
| vicuna-7b | 16 | 13543 | 26.06 | / |
|
47 |
+
| vicuna-7b | 4 | 5547 | 12.43 | 2.1x |
|
48 |
+
| llama2-7b-chat | 16 | 13543 | 27.14 | / |
|
49 |
+
| llama2-7b-chat | 4 | 5547 | 12.44 | 2.2x |
|
50 |
+
| vicuna-13b | 16 | 25647 | 44.91 | / |
|
51 |
+
| vicuna-13b | 4 | 9355 | 17.30 | 2.6x |
|
52 |
+
| llama2-13b-chat | 16 | 25647 | 47.28 | / |
|
53 |
+
| llama2-13b-chat | 4 | 9355 | 20.28 | 2.3x |
|
54 |
+
|
55 |
+
* NVIDIA RTX 4090:
|
56 |
+
|
57 |
+
| Model | AWQ 4bit Speed (ms/token) | FP16 Speed (ms/token) | AWQ Speedup |
|
58 |
+
| --------------- | ------------------------- | --------------------- | ----------- |
|
59 |
+
| vicuna-7b | 8.61 | 19.09 | 2.2x |
|
60 |
+
| llama2-7b-chat | 8.66 | 19.97 | 2.3x |
|
61 |
+
| vicuna-13b | 12.17 | OOM | / |
|
62 |
+
| llama2-13b-chat | 13.54 | OOM | / |
|
63 |
+
|
64 |
+
* NVIDIA Jetson Orin:
|
65 |
+
|
66 |
+
| Model | AWQ 4bit Speed (ms/token) | FP16 Speed (ms/token) | AWQ Speedup |
|
67 |
+
| --------------- | ------------------------- | --------------------- | ----------- |
|
68 |
+
| vicuna-7b | 65.34 | 93.12 | 1.4x |
|
69 |
+
| llama2-7b-chat | 75.11 | 104.71 | 1.4x |
|
70 |
+
| vicuna-13b | 115.40 | OOM | / |
|
71 |
+
| llama2-13b-chat | 136.81 | OOM | / |
|
docs/commands/conv_release.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Chatbot Arena Conversations
|
2 |
+
|
3 |
+
1. Gather battles
|
4 |
+
```
|
5 |
+
python3 clean_battle_data.py --max-num 10 --mode conv_release
|
6 |
+
```
|
7 |
+
|
8 |
+
2. Tag OpenAI moderation
|
9 |
+
```
|
10 |
+
python3 tag_openai_moderation.py --in clean_battle_conv_20230814.json
|
11 |
+
```
|
12 |
+
|
13 |
+
3. Clean PII
|
14 |
+
|
15 |
+
4. Filter additional blocked words
|
16 |
+
|
17 |
+
```
|
18 |
+
python3 filter_bad_conv.py --in clean_battle_conv_20230630_tagged_v1_pii.json
|
19 |
+
```
|
20 |
+
|
21 |
+
5. Add additional toxicity tag
|
22 |
+
|
23 |
+
|
24 |
+
## All Conversations
|
25 |
+
|
26 |
+
1. Gather chats
|
27 |
+
```
|
28 |
+
python3 clean_chat_data.py
|
29 |
+
```
|
30 |
+
|
31 |
+
2. Sample
|
32 |
+
```
|
33 |
+
python3 conv_release_scripts/sample.py
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Prompt distribution
|
38 |
+
|
docs/commands/data_cleaning.md
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Data cleaning
|
2 |
+
|
3 |
+
## Requirements
|
4 |
+
```
|
5 |
+
pip3 install bs4 markdownify
|
6 |
+
pip3 install polyglot pyicu pycld2
|
7 |
+
```
|
8 |
+
|
9 |
+
## Steps
|
10 |
+
```
|
11 |
+
# Convert html to markdown
|
12 |
+
python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
|
13 |
+
|
14 |
+
# Keep or remove specific languages
|
15 |
+
python3 -m fastchat.data.optional_clean --in sharegpt_clean.json --out sharegpt_clean_lang.json --skip-lang SOME_LANGUAGE_CODE
|
16 |
+
|
17 |
+
# Split long conversations
|
18 |
+
python3 -m fastchat.data.split_long_conversation --in sharegpt_clean_lang.json --out sharegpt_clean_lang_split.json --model-name /home/ubuntu/model_weights/llama-7b/
|
19 |
+
```
|
docs/commands/leaderboard.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Get logs
|
2 |
+
```
|
3 |
+
gsutil -m rsync -r gs://fastchat_logs ~/fastchat_logs/
|
4 |
+
```
|
5 |
+
|
6 |
+
### Clean battle data
|
7 |
+
```
|
8 |
+
cd ~/FastChat/fastchat/serve/monitor
|
9 |
+
python3 clean_battle_data.py
|
10 |
+
```
|
11 |
+
|
12 |
+
### Run Elo analysis
|
13 |
+
```
|
14 |
+
python3 elo_analysis.py --clean-battle-file clean_battle_20230905.json
|
15 |
+
```
|
16 |
+
|
17 |
+
### Copy files to HF space
|
18 |
+
1. update plots
|
19 |
+
```
|
20 |
+
scp atlas:/data/lmzheng/FastChat/fastchat/serve/monitor/elo_results_20230905.pkl .
|
21 |
+
```
|
22 |
+
|
23 |
+
2. update table
|
24 |
+
```
|
25 |
+
wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/raw/main/leaderboard_table_20230905.csv
|
26 |
+
```
|
27 |
+
|
28 |
+
### Update files on webserver
|
29 |
+
```
|
30 |
+
DATE=20231002
|
31 |
+
|
32 |
+
rm -rf elo_results.pkl leaderboard_table.csv
|
33 |
+
wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/resolve/main/elo_results_$DATE.pkl
|
34 |
+
wget https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard/resolve/main/leaderboard_table_$DATE.csv
|
35 |
+
ln -s leaderboard_table_$DATE.csv leaderboard_table.csv
|
36 |
+
ln -s elo_results_$DATE.pkl elo_results.pkl
|
37 |
+
```
|
docs/commands/local_cluster.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Local GPU cluster
|
2 |
+
node-01
|
3 |
+
```
|
4 |
+
python3 -m fastchat.serve.controller --host 0.0.0.0 --port 10002
|
5 |
+
|
6 |
+
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-13b-v1.5 --model-name vicuna-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000
|
7 |
+
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-13b-v1.5 --model-name vicuna-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker-address http://$(hostname):31001
|
8 |
+
|
9 |
+
CUDA_VISIBLE_DEVICES=2,3 ray start --head
|
10 |
+
python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-33b-v1.3 --model-name vicuna-33b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --num-gpus 2
|
11 |
+
```
|
12 |
+
|
13 |
+
node-02
|
14 |
+
```
|
15 |
+
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-13b-chat-hf --model-name llama-2-13b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000 --tokenizer meta-llama/Llama-2-7b-chat-hf
|
16 |
+
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-13b-chat-hf --model-name llama-2-13b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker-address http://$(hostname):31001 --tokenizer meta-llama/Llama-2-7b-chat-hf
|
17 |
+
CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.vllm_worker --model-path meta-llama/Llama-2-7b-chat-hf --model-name llama-2-7b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --tokenizer meta-llama/Llama-2-7b-chat-hf
|
18 |
+
CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.vllm_worker --model-path WizardLM/WizardLM-13B-V1.1 --model-name wizardlm-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31003 --worker-address http://$(hostname):31003
|
19 |
+
```
|
20 |
+
|
21 |
+
node-03
|
22 |
+
```
|
23 |
+
python3 -m fastchat.serve.vllm_worker --model-path mosaicml/mpt-30b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker-address http://$(hostname):31000 --num-gpus 2
|
24 |
+
python3 -m fastchat.serve.vllm_worker --model-path timdettmers/guanaco-33b-merged --model-name guanaco-33b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker-address http://$(hostname):31002 --num-gpus 2 --tokenizer hf-internal-testing/llama-tokenizer
|
25 |
+
```
|
26 |
+
|
27 |
+
node-04
|
28 |
+
```
|
29 |
+
CUDA_VISIBLE_DEVICES=0 python3 -m fastchat.serve.multi_model_worker --model-path ~/model_weights/RWKV-4-Raven-14B-v12-Eng98%25-Other2%25-20230523-ctx8192.pth --model-name RWKV-4-Raven-14B --model-path lmsys/fastchat-t5-3b-v1.0 --model-name fastchat-t5-3b --controller http://node-01:10002 --host 0.0.0.0 --port 31000 --worker http://$(hostname):31000 --limit 4
|
30 |
+
CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.multi_model_worker --model-path OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5 --model-name oasst-pythia-12b --model-path mosaicml/mpt-7b-chat --model-name mpt-7b-chat --controller http://node-01:10002 --host 0.0.0.0 --port 31001 --worker http://$(hostname):31001 --limit 4
|
31 |
+
CUDA_VISIBLE_DEVICES=2 python3 -m fastchat.serve.multi_model_worker --model-path lmsys/vicuna-7b-v1.5 --model-name vicuna-7b --model-path THUDM/chatglm-6b --model-name chatglm-6b --controller http://node-01:10002 --host 0.0.0.0 --port 31002 --worker http://$(hostname):31002 --limit 4
|
32 |
+
CUDA_VISIBLE_DEVICES=3 python3 -m fastchat.serve.vllm_worker --model-path ~/model_weights/alpaca-13b --controller http://node-01:10002 --host 0.0.0.0 --port 31003 --worker-address http://$(hostname):31003
|
33 |
+
```
|
34 |
+
|
35 |
+
test
|
36 |
+
```
|
37 |
+
python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:10002
|
38 |
+
```
|
docs/commands/pypi.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Requirement
|
2 |
+
```
|
3 |
+
python3 -m pip install twine
|
4 |
+
python3 -m pip install --upgrade pip
|
5 |
+
pip3 install build
|
6 |
+
```
|
7 |
+
|
8 |
+
### Upload
|
9 |
+
```
|
10 |
+
bash scripts/upload_pypi.sh
|
11 |
+
```
|
docs/commands/webserver.md
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Install
|
2 |
+
```
|
3 |
+
sudo apt update
|
4 |
+
sudo apt install tmux htop
|
5 |
+
|
6 |
+
wget https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh
|
7 |
+
bash Anaconda3-2022.10-Linux-x86_64.sh
|
8 |
+
|
9 |
+
conda create -n fastchat python=3.9
|
10 |
+
conda activate fastchat
|
11 |
+
|
12 |
+
git clone https://github.com/lm-sys/FastChat.git
|
13 |
+
cd FastChat
|
14 |
+
pip3 install -e .
|
15 |
+
```
|
16 |
+
|
17 |
+
|
18 |
+
### Launch servers
|
19 |
+
```
|
20 |
+
cd fastchat_logs/controller
|
21 |
+
python3 -m fastchat.serve.controller --host 0.0.0.0 --port 21001
|
22 |
+
python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name https://
|
23 |
+
python3 -m fastchat.serve.test_message --model vicuna-13b --controller http://localhost:21001
|
24 |
+
|
25 |
+
cd fastchat_logs/server0
|
26 |
+
|
27 |
+
python3 -m fastchat.serve.huggingface_api_worker --model-info-file ~/elo_results/register_hf_api_models.json
|
28 |
+
|
29 |
+
export OPENAI_API_KEY=
|
30 |
+
export ANTHROPIC_API_KEY=
|
31 |
+
export GCP_PROJECT_ID=
|
32 |
+
|
33 |
+
python3 -m fastchat.serve.gradio_web_server_multi --controller http://localhost:21001 --concurrency 50 --add-chatgpt --add-claude --add-palm --elo ~/elo_results/elo_results.pkl --leaderboard-table-file ~/elo_results/leaderboard_table.csv --register ~/elo_results/register_oai_models.json --show-terms
|
34 |
+
|
35 |
+
python3 backup_logs.py
|
36 |
+
```
|
37 |
+
|
38 |
+
|
39 |
+
### Check the launch time
|
40 |
+
```
|
41 |
+
for i in $(seq 0 11); do cat fastchat_logs/server$i/gradio_web_server.log | grep "Running on local URL" | tail -n 1; done
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
### Increase the limit of max open files
|
46 |
+
One process (do not need reboot)
|
47 |
+
```
|
48 |
+
sudo prlimit --nofile=1048576:1048576 --pid=$id
|
49 |
+
|
50 |
+
for id in $(ps -ef | grep gradio_web_server | awk '{print $2}'); do echo $id; prlimit --nofile=1048576:1048576 --pid=$id; done
|
51 |
+
```
|
52 |
+
|
53 |
+
System (need reboot): Add the lines below to `/etc/security/limits.conf`
|
54 |
+
```
|
55 |
+
* hard nofile 65535
|
56 |
+
* soft nofile 65535
|
57 |
+
```
|
58 |
+
|
59 |
+
|
60 |
+
### Gradio edit (3.35.2)
|
61 |
+
1. gtag and canvas
|
62 |
+
```
|
63 |
+
vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/templates/frontend/index.html
|
64 |
+
```
|
65 |
+
|
66 |
+
```
|
67 |
+
<!-- Google tag (gtag.js) -->
|
68 |
+
<script async src="https://www.googletagmanager.com/gtag/js?id=G-K6D24EE9ED"></script><script>
|
69 |
+
window.dataLayer = window.dataLayer || [];
|
70 |
+
function gtag(){dataLayer.push(arguments);}
|
71 |
+
gtag('js', new Date());
|
72 |
+
gtag('config', 'G-K6D24EE9ED');
|
73 |
+
window.__gradio_mode__ = "app";
|
74 |
+
</script>
|
75 |
+
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js"></script>
|
76 |
+
```
|
77 |
+
|
78 |
+
2. deprecation warnings
|
79 |
+
```
|
80 |
+
vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/deprecation.py
|
81 |
+
```
|
82 |
+
|
83 |
+
```
|
84 |
+
def check_deprecated_parameters(
|
85 |
+
```
|
86 |
+
|
87 |
+
3. Loading
|
88 |
+
```
|
89 |
+
vim /home/vicuna/anaconda3/envs/fastchat/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-188ef5e8.js
|
90 |
+
```
|
91 |
+
|
92 |
+
```
|
93 |
+
%s/"Loading..."/"Loading...(Please refresh if it takes more than 30 seconds)"/g
|
94 |
+
```
|
docs/dashinfer_integration.md
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# dash-infer Integration
|
2 |
+
[DashInfer](https://github.com/modelscope/dash-infer) is a high-performance inference engine specifically optimized for CPU environments, delivering exceptional performance boosts for LLM inference tasks. It supports acceleration for a variety of models including Llama, Qwen, and ChatGLM, making it a versatile choice as a performant worker in FastChat. Notably, DashInfer exhibits significant performance enhancements on both Intel x64 and ARMv9 processors, catering to a wide spectrum of hardware platforms. Its efficient design and optimization techniques ensure rapid and accurate inference capabilities, making it an ideal solution for deploying large language models in resource-constrained environments or scenarios where CPU utilization is preferred over GPU acceleration.
|
3 |
+
|
4 |
+
## Instructions
|
5 |
+
1. Install dash-infer.
|
6 |
+
```
|
7 |
+
pip install dashinfer
|
8 |
+
```
|
9 |
+
|
10 |
+
2. When you launch a model worker, replace the normal worker (`fastchat.serve.model_worker`) with the dash-infer worker (`fastchat.serve.dashinfer_worker`). All other commands such as controller, gradio web server, and OpenAI API server are kept the same.
|
11 |
+
```
|
12 |
+
python3 -m fastchat.serve.dashinfer_worker --model-path qwen/Qwen-7B-Chat --revision=master /path/to/dashinfer-model-generation-config.json
|
13 |
+
```
|
14 |
+
Here is an example:
|
15 |
+
```
|
16 |
+
python3 -m fastchat.serve.dashinfer_worker --model-path qwen/Qwen-7B-Chat --revision=master dash-infer/examples/python/model_config/config_qwen_v10_7b.json
|
17 |
+
```
|
18 |
+
|
19 |
+
If you use an already downloaded model, try to replace model-path with a local one and choose a conversation template via --conv-template option
|
20 |
+
'''
|
21 |
+
python3 -m fastchat.serve.dashinfer_worker --model-path ~/.cache/modelscope/hub/qwen/Qwen-7B-Chat --conv-template qwen-7b-chat /path/to/dashinfer-model-generation-config.json
|
22 |
+
'''
|
23 |
+
All avaliable conversation chat templates are listed at [fastchat/conversation.py](../fastchat/conversation.py)
|
docs/dataset_release.md
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Datasets
|
2 |
+
We release the following datasets based on our projects and websites.
|
3 |
+
|
4 |
+
- [LMSYS-Chat-1M: A Large-Scale Real-World LLM Conversation Dataset](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
5 |
+
- [Chatbot Arena Conversation Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations)
|
6 |
+
- [MT-bench Human Annotation Dataset](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)
|
docs/exllama_v2.md
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ExllamaV2 GPTQ Inference Framework
|
2 |
+
|
3 |
+
Integrated [ExllamaV2](https://github.com/turboderp/exllamav2) customized kernel into Fastchat to provide **Faster** GPTQ inference speed.
|
4 |
+
|
5 |
+
**Note: Exllama not yet support embedding REST API.**
|
6 |
+
|
7 |
+
## Install ExllamaV2
|
8 |
+
|
9 |
+
Setup environment (please refer to [this link](https://github.com/turboderp/exllamav2#how-to) for more details):
|
10 |
+
|
11 |
+
```bash
|
12 |
+
git clone https://github.com/turboderp/exllamav2
|
13 |
+
cd exllamav2
|
14 |
+
pip install -e .
|
15 |
+
```
|
16 |
+
|
17 |
+
Chat with the CLI:
|
18 |
+
```bash
|
19 |
+
python3 -m fastchat.serve.cli \
|
20 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
21 |
+
--enable-exllama
|
22 |
+
```
|
23 |
+
|
24 |
+
Start model worker:
|
25 |
+
```bash
|
26 |
+
# Download quantized model from huggingface
|
27 |
+
# Make sure you have git-lfs installed (https://git-lfs.com)
|
28 |
+
git lfs install
|
29 |
+
git clone https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g models/vicuna-7B-1.1-GPTQ-4bit-128g
|
30 |
+
|
31 |
+
# Load model with default configuration (max sequence length 4096, no GPU split setting).
|
32 |
+
python3 -m fastchat.serve.model_worker \
|
33 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
34 |
+
--enable-exllama
|
35 |
+
|
36 |
+
#Load model with max sequence length 2048, allocate 18 GB to CUDA:0 and 24 GB to CUDA:1.
|
37 |
+
python3 -m fastchat.serve.model_worker \
|
38 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
39 |
+
--enable-exllama \
|
40 |
+
--exllama-max-seq-len 2048 \
|
41 |
+
--exllama-gpu-split 18,24
|
42 |
+
```
|
43 |
+
|
44 |
+
`--exllama-cache-8bit` can be used to enable 8-bit caching with exllama and save some VRAM.
|
45 |
+
|
46 |
+
## Performance
|
47 |
+
|
48 |
+
Reference: https://github.com/turboderp/exllamav2#performance
|
49 |
+
|
50 |
+
|
51 |
+
| Model | Mode | Size | grpsz | act | V1: 3090Ti | V1: 4090 | V2: 3090Ti | V2: 4090 |
|
52 |
+
|------------|--------------|-------|-------|-----|------------|----------|------------|-------------|
|
53 |
+
| Llama | GPTQ | 7B | 128 | no | 143 t/s | 173 t/s | 175 t/s | **195** t/s |
|
54 |
+
| Llama | GPTQ | 13B | 128 | no | 84 t/s | 102 t/s | 105 t/s | **110** t/s |
|
55 |
+
| Llama | GPTQ | 33B | 128 | yes | 37 t/s | 45 t/s | 45 t/s | **48** t/s |
|
56 |
+
| OpenLlama | GPTQ | 3B | 128 | yes | 194 t/s | 226 t/s | 295 t/s | **321** t/s |
|
57 |
+
| CodeLlama | EXL2 4.0 bpw | 34B | - | - | - | - | 42 t/s | **48** t/s |
|
58 |
+
| Llama2 | EXL2 3.0 bpw | 7B | - | - | - | - | 195 t/s | **224** t/s |
|
59 |
+
| Llama2 | EXL2 4.0 bpw | 7B | - | - | - | - | 164 t/s | **197** t/s |
|
60 |
+
| Llama2 | EXL2 5.0 bpw | 7B | - | - | - | - | 144 t/s | **160** t/s |
|
61 |
+
| Llama2 | EXL2 2.5 bpw | 70B | - | - | - | - | 30 t/s | **35** t/s |
|
62 |
+
| TinyLlama | EXL2 3.0 bpw | 1.1B | - | - | - | - | 536 t/s | **635** t/s |
|
63 |
+
| TinyLlama | EXL2 4.0 bpw | 1.1B | - | - | - | - | 509 t/s | **590** t/s |
|
docs/gptq.md
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GPTQ 4bit Inference
|
2 |
+
|
3 |
+
Support GPTQ 4bit inference with [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa).
|
4 |
+
|
5 |
+
1. Window user: use the `old-cuda` branch.
|
6 |
+
2. Linux user: recommend the `fastest-inference-4bit` branch.
|
7 |
+
|
8 |
+
## Install
|
9 |
+
|
10 |
+
Setup environment:
|
11 |
+
```bash
|
12 |
+
# cd /path/to/FastChat
|
13 |
+
git clone https://github.com/qwopqwop200/GPTQ-for-LLaMa.git repositories/GPTQ-for-LLaMa
|
14 |
+
cd repositories/GPTQ-for-LLaMa
|
15 |
+
# Window's user should use the `old-cuda` branch
|
16 |
+
git switch fastest-inference-4bit
|
17 |
+
# Install `quant-cuda` package in FastChat's virtualenv
|
18 |
+
python3 setup_cuda.py install
|
19 |
+
pip3 install texttable
|
20 |
+
```
|
21 |
+
|
22 |
+
Chat with the CLI:
|
23 |
+
```bash
|
24 |
+
python3 -m fastchat.serve.cli \
|
25 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
26 |
+
--gptq-wbits 4 \
|
27 |
+
--gptq-groupsize 128
|
28 |
+
```
|
29 |
+
|
30 |
+
Start model worker:
|
31 |
+
```bash
|
32 |
+
# Download quantized model from huggingface
|
33 |
+
# Make sure you have git-lfs installed (https://git-lfs.com)
|
34 |
+
git lfs install
|
35 |
+
git clone https://huggingface.co/TheBloke/vicuna-7B-1.1-GPTQ-4bit-128g models/vicuna-7B-1.1-GPTQ-4bit-128g
|
36 |
+
|
37 |
+
python3 -m fastchat.serve.model_worker \
|
38 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
39 |
+
--gptq-wbits 4 \
|
40 |
+
--gptq-groupsize 128
|
41 |
+
|
42 |
+
# You can specify which quantized model to use
|
43 |
+
python3 -m fastchat.serve.model_worker \
|
44 |
+
--model-path models/vicuna-7B-1.1-GPTQ-4bit-128g \
|
45 |
+
--gptq-ckpt models/vicuna-7B-1.1-GPTQ-4bit-128g/vicuna-7B-1.1-GPTQ-4bit-128g.safetensors \
|
46 |
+
--gptq-wbits 4 \
|
47 |
+
--gptq-groupsize 128 \
|
48 |
+
--gptq-act-order
|
49 |
+
```
|
50 |
+
|
51 |
+
## Benchmark
|
52 |
+
|
53 |
+
| LLaMA-13B | branch | Bits | group-size | memory(MiB) | PPL(c4) | Median(s/token) | act-order | speed up |
|
54 |
+
| --------- | ---------------------- | ---- | ---------- | ----------- | ------- | --------------- | --------- | -------- |
|
55 |
+
| FP16 | fastest-inference-4bit | 16 | - | 26634 | 6.96 | 0.0383 | - | 1x |
|
56 |
+
| GPTQ | triton | 4 | 128 | 8590 | 6.97 | 0.0551 | - | 0.69x |
|
57 |
+
| GPTQ | fastest-inference-4bit | 4 | 128 | 8699 | 6.97 | 0.0429 | true | 0.89x |
|
58 |
+
| GPTQ | fastest-inference-4bit | 4 | 128 | 8699 | 7.03 | 0.0287 | false | 1.33x |
|
59 |
+
| GPTQ | fastest-inference-4bit | 4 | -1 | 8448 | 7.12 | 0.0284 | false | 1.44x |
|
docs/langchain_integration.md
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Local LangChain with FastChat
|
2 |
+
|
3 |
+
[LangChain](https://python.langchain.com/en/latest/index.html) is a library that facilitates the development of applications by leveraging large language models (LLMs) and enabling their composition with other sources of computation or knowledge.
|
4 |
+
FastChat's OpenAI-compatible [API server](openai_api.md) enables using LangChain with open models seamlessly.
|
5 |
+
|
6 |
+
## Launch RESTful API Server
|
7 |
+
|
8 |
+
Here are the steps to launch a local OpenAI API server for LangChain.
|
9 |
+
|
10 |
+
First, launch the controller
|
11 |
+
|
12 |
+
```bash
|
13 |
+
python3 -m fastchat.serve.controller
|
14 |
+
```
|
15 |
+
|
16 |
+
LangChain uses OpenAI model names by default, so we need to assign some faux OpenAI model names to our local model.
|
17 |
+
Here, we use Vicuna as an example and use it for three endpoints: chat completion, completion, and embedding.
|
18 |
+
`--model-path` can be a local folder or a Hugging Face repo name.
|
19 |
+
See a full list of supported models [here](../README.md#supported-models).
|
20 |
+
|
21 |
+
```bash
|
22 |
+
python3 -m fastchat.serve.model_worker --model-names "gpt-3.5-turbo,text-davinci-003,text-embedding-ada-002" --model-path lmsys/vicuna-7b-v1.5
|
23 |
+
```
|
24 |
+
|
25 |
+
Finally, launch the RESTful API server
|
26 |
+
|
27 |
+
```bash
|
28 |
+
python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
|
29 |
+
```
|
30 |
+
|
31 |
+
## Set OpenAI Environment
|
32 |
+
|
33 |
+
You can set your environment with the following commands.
|
34 |
+
|
35 |
+
Set OpenAI base url
|
36 |
+
|
37 |
+
```bash
|
38 |
+
export OPENAI_API_BASE=http://localhost:8000/v1
|
39 |
+
```
|
40 |
+
|
41 |
+
Set OpenAI API key
|
42 |
+
|
43 |
+
```bash
|
44 |
+
export OPENAI_API_KEY=EMPTY
|
45 |
+
```
|
46 |
+
|
47 |
+
If you meet the following OOM error while creating embeddings, please set a smaller batch size by using environment variables.
|
48 |
+
|
49 |
+
~~~bash
|
50 |
+
openai.error.APIError: Invalid response object from API: '{"object":"error","message":"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**\\n\\n(CUDA out of memory. Tried to allocate xxx MiB (GPU 0; xxx GiB total capacity; xxx GiB already allocated; xxx MiB free; xxx GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF)","code":50002}' (HTTP response code was 400)
|
51 |
+
~~~
|
52 |
+
|
53 |
+
You can try `export FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE=1`.
|
54 |
+
|
55 |
+
## Try local LangChain
|
56 |
+
|
57 |
+
Here is a question answerting example.
|
58 |
+
|
59 |
+
Download a text file.
|
60 |
+
|
61 |
+
```bash
|
62 |
+
wget https://raw.githubusercontent.com/hwchase17/langchain/v0.0.200/docs/modules/state_of_the_union.txt
|
63 |
+
```
|
64 |
+
|
65 |
+
Run LangChain.
|
66 |
+
|
67 |
+
~~~py
|
68 |
+
from langchain.chat_models import ChatOpenAI
|
69 |
+
from langchain.document_loaders import TextLoader
|
70 |
+
from langchain.embeddings import OpenAIEmbeddings
|
71 |
+
from langchain.indexes import VectorstoreIndexCreator
|
72 |
+
|
73 |
+
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
|
74 |
+
loader = TextLoader("state_of_the_union.txt")
|
75 |
+
index = VectorstoreIndexCreator(embedding=embedding).from_loaders([loader])
|
76 |
+
llm = ChatOpenAI(model="gpt-3.5-turbo")
|
77 |
+
|
78 |
+
questions = [
|
79 |
+
"Who is the speaker",
|
80 |
+
"What did the president say about Ketanji Brown Jackson",
|
81 |
+
"What are the threats to America",
|
82 |
+
"Who are mentioned in the speech",
|
83 |
+
"Who is the vice president",
|
84 |
+
"How many projects were announced",
|
85 |
+
]
|
86 |
+
|
87 |
+
for query in questions:
|
88 |
+
print("Query:", query)
|
89 |
+
print("Answer:", index.query(query, llm=llm))
|
90 |
+
~~~
|
docs/lightllm_integration.md
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# LightLLM Integration
|
2 |
+
You can use [LightLLM](https://github.com/ModelTC/lightllm) as an optimized worker implementation in FastChat.
|
3 |
+
It offers advanced continuous batching and a much higher (~10x) throughput.
|
4 |
+
See the supported models [here](https://github.com/ModelTC/lightllm?tab=readme-ov-file#supported-model-list).
|
5 |
+
|
6 |
+
## Instructions
|
7 |
+
1. Please refer to the [Get started](https://github.com/ModelTC/lightllm?tab=readme-ov-file#get-started) to install LightLLM. Or use [Pre-built image](https://github.com/ModelTC/lightllm?tab=readme-ov-file#container)
|
8 |
+
|
9 |
+
2. When you launch a model worker, replace the normal worker (`fastchat.serve.model_worker`) with the LightLLM worker (`fastchat.serve.lightllm_worker`). All other commands such as controller, gradio web server, and OpenAI API server are kept the same. Refer to [--max_total_token_num](https://github.com/ModelTC/lightllm/blob/4a9824b6b248f4561584b8a48ae126a0c8f5b000/docs/ApiServerArgs.md?plain=1#L23) to understand how to calculate the `--max_total_token_num` argument.
|
10 |
+
```
|
11 |
+
python3 -m fastchat.serve.lightllm_worker --model-path lmsys/vicuna-7b-v1.5 --tokenizer_mode "auto" --max_total_token_num 154000
|
12 |
+
```
|
13 |
+
|
14 |
+
If you what to use quantized weight and kv cache for inference, try
|
15 |
+
|
16 |
+
```
|
17 |
+
python3 -m fastchat.serve.lightllm_worker --model-path lmsys/vicuna-7b-v1.5 --tokenizer_mode "auto" --max_total_token_num 154000 --mode triton_int8weight triton_int8kv
|
18 |
+
```
|
docs/mlx_integration.md
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Apple MLX Integration
|
2 |
+
|
3 |
+
You can use [Apple MLX](https://github.com/ml-explore/mlx) as an optimized worker implementation in FastChat.
|
4 |
+
|
5 |
+
It runs models efficiently on Apple Silicon
|
6 |
+
|
7 |
+
See the supported models [here](https://github.com/ml-explore/mlx-examples/tree/main/llms#supported-models).
|
8 |
+
|
9 |
+
Note that for Apple Silicon Macs with less memory, smaller models (or quantized models) are recommended.
|
10 |
+
|
11 |
+
## Instructions
|
12 |
+
|
13 |
+
1. Install MLX.
|
14 |
+
|
15 |
+
```
|
16 |
+
pip install "mlx-lm>=0.0.6"
|
17 |
+
```
|
18 |
+
|
19 |
+
2. When you launch a model worker, replace the normal worker (`fastchat.serve.model_worker`) with the MLX worker (`fastchat.serve.mlx_worker`). Remember to launch a model worker after you have launched the controller ([instructions](../README.md))
|
20 |
+
|
21 |
+
```
|
22 |
+
python3 -m fastchat.serve.mlx_worker --model-path TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
23 |
+
```
|
docs/model_support.md
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model Support
|
2 |
+
This document describes how to support a new model in FastChat.
|
3 |
+
|
4 |
+
## Content
|
5 |
+
- [Local Models](#local-models)
|
6 |
+
- [API-Based Models](#api-based-models)
|
7 |
+
|
8 |
+
## Local Models
|
9 |
+
To support a new local model in FastChat, you need to correctly handle its prompt template and model loading.
|
10 |
+
The goal is to make the following command run with the correct prompts.
|
11 |
+
|
12 |
+
```
|
13 |
+
python3 -m fastchat.serve.cli --model [YOUR_MODEL_PATH]
|
14 |
+
```
|
15 |
+
|
16 |
+
You can run this example command to learn the code logic.
|
17 |
+
|
18 |
+
```
|
19 |
+
python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.5
|
20 |
+
```
|
21 |
+
|
22 |
+
You can add `--debug` to see the actual prompt sent to the model.
|
23 |
+
|
24 |
+
### Steps
|
25 |
+
|
26 |
+
FastChat uses the `Conversation` class to handle prompt templates and `BaseModelAdapter` class to handle model loading.
|
27 |
+
|
28 |
+
1. Implement a conversation template for the new model at [fastchat/conversation.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py). You can follow existing examples and use `register_conv_template` to add a new one. Please also add a link to the official reference code if possible.
|
29 |
+
2. Implement a model adapter for the new model at [fastchat/model/model_adapter.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/model_adapter.py). You can follow existing examples and use `register_model_adapter` to add a new one.
|
30 |
+
3. (Optional) add the model name to the "Supported models" [section](#supported-models) above and add more information in [fastchat/model/model_registry.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/model/model_registry.py).
|
31 |
+
|
32 |
+
After these steps, the new model should be compatible with most FastChat features, such as CLI, web UI, model worker, and OpenAI-compatible API server. Please do some testing with these features as well.
|
33 |
+
|
34 |
+
### Supported models
|
35 |
+
|
36 |
+
- [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
|
37 |
+
- example: `python3 -m fastchat.serve.cli --model-path meta-llama/Llama-2-7b-chat-hf`
|
38 |
+
- Vicuna, Alpaca, LLaMA, Koala
|
39 |
+
- example: `python3 -m fastchat.serve.cli --model-path lmsys/vicuna-7b-v1.5`
|
40 |
+
- [allenai/tulu-2-dpo-7b](https://huggingface.co/allenai/tulu-2-dpo-7b)
|
41 |
+
- [BAAI/AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)
|
42 |
+
- [BAAI/AquilaChat2-7B](https://huggingface.co/BAAI/AquilaChat2-7B)
|
43 |
+
- [BAAI/AquilaChat2-34B](https://huggingface.co/BAAI/AquilaChat2-34B)
|
44 |
+
- [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en#using-huggingface-transformers)
|
45 |
+
- [argilla/notus-7b-v1](https://huggingface.co/argilla/notus-7b-v1)
|
46 |
+
- [baichuan-inc/baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B)
|
47 |
+
- [BlinkDL/RWKV-4-Raven](https://huggingface.co/BlinkDL/rwkv-4-raven)
|
48 |
+
- example: `python3 -m fastchat.serve.cli --model-path ~/model_weights/RWKV-4-Raven-7B-v11x-Eng99%-Other1%-20230429-ctx8192.pth`
|
49 |
+
- [bofenghuang/vigogne-2-7b-instruct](https://huggingface.co/bofenghuang/vigogne-2-7b-instruct)
|
50 |
+
- [bofenghuang/vigogne-2-7b-chat](https://huggingface.co/bofenghuang/vigogne-2-7b-chat)
|
51 |
+
- [camel-ai/CAMEL-13B-Combined-Data](https://huggingface.co/camel-ai/CAMEL-13B-Combined-Data)
|
52 |
+
- [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf)
|
53 |
+
- [databricks/dolly-v2-12b](https://huggingface.co/databricks/dolly-v2-12b)
|
54 |
+
- [deepseek-ai/deepseek-llm-67b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)
|
55 |
+
- [deepseek-ai/deepseek-coder-33b-instruct](https://huggingface.co/deepseek-ai/deepseek-coder-33b-instruct)
|
56 |
+
- [FlagAlpha/Llama2-Chinese-13b-Chat](https://huggingface.co/FlagAlpha/Llama2-Chinese-13b-Chat)
|
57 |
+
- [FreedomIntelligence/phoenix-inst-chat-7b](https://huggingface.co/FreedomIntelligence/phoenix-inst-chat-7b)
|
58 |
+
- [FreedomIntelligence/ReaLM-7b-v1](https://huggingface.co/FreedomIntelligence/Realm-7b)
|
59 |
+
- [h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b](https://huggingface.co/h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b)
|
60 |
+
- [HuggingFaceH4/starchat-beta](https://huggingface.co/HuggingFaceH4/starchat-beta)
|
61 |
+
- [HuggingFaceH4/zephyr-7b-alpha](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha)
|
62 |
+
- [internlm/internlm-chat-7b](https://huggingface.co/internlm/internlm-chat-7b)
|
63 |
+
- [cllm/consistency-llm-7b-codesearchnet/consistency-llm-7b-gsm8k/consistency-llm-7b-sharegpt48k/consistency-llm-7b-spider](https://huggingface.co/cllm)
|
64 |
+
- [IEITYuan/Yuan2-2B/51B/102B-hf](https://huggingface.co/IEITYuan)
|
65 |
+
- [lcw99/polyglot-ko-12.8b-chang-instruct-chat](https://huggingface.co/lcw99/polyglot-ko-12.8b-chang-instruct-chat)
|
66 |
+
- [lmsys/fastchat-t5-3b-v1.0](https://huggingface.co/lmsys/fastchat-t5)
|
67 |
+
- [meta-math/MetaMath-7B-V1.0](https://huggingface.co/meta-math/MetaMath-7B-V1.0)
|
68 |
+
- [Microsoft/Orca-2-7b](https://huggingface.co/microsoft/Orca-2-7b)
|
69 |
+
- [mosaicml/mpt-7b-chat](https://huggingface.co/mosaicml/mpt-7b-chat)
|
70 |
+
- example: `python3 -m fastchat.serve.cli --model-path mosaicml/mpt-7b-chat`
|
71 |
+
- [Neutralzz/BiLLa-7B-SFT](https://huggingface.co/Neutralzz/BiLLa-7B-SFT)
|
72 |
+
- [nomic-ai/gpt4all-13b-snoozy](https://huggingface.co/nomic-ai/gpt4all-13b-snoozy)
|
73 |
+
- [NousResearch/Nous-Hermes-13b](https://huggingface.co/NousResearch/Nous-Hermes-13b)
|
74 |
+
- [openaccess-ai-collective/manticore-13b-chat-pyg](https://huggingface.co/openaccess-ai-collective/manticore-13b-chat-pyg)
|
75 |
+
- [OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5](https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5)
|
76 |
+
- [openchat/openchat_3.5](https://huggingface.co/openchat/openchat_3.5)
|
77 |
+
- [Open-Orca/Mistral-7B-OpenOrca](https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca)
|
78 |
+
- [OpenLemur/lemur-70b-chat-v1](https://huggingface.co/OpenLemur/lemur-70b-chat-v1)
|
79 |
+
- [Phind/Phind-CodeLlama-34B-v2](https://huggingface.co/Phind/Phind-CodeLlama-34B-v2)
|
80 |
+
- [project-baize/baize-v2-7b](https://huggingface.co/project-baize/baize-v2-7b)
|
81 |
+
- [Qwen/Qwen-7B-Chat](https://huggingface.co/Qwen/Qwen-7B-Chat)
|
82 |
+
- [rishiraj/CatPPT](https://huggingface.co/rishiraj/CatPPT)
|
83 |
+
- [Salesforce/codet5p-6b](https://huggingface.co/Salesforce/codet5p-6b)
|
84 |
+
- [StabilityAI/stablelm-tuned-alpha-7b](https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b)
|
85 |
+
- [tenyx/TenyxChat-7B-v1](https://huggingface.co/tenyx/TenyxChat-7B-v1)
|
86 |
+
- [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0)
|
87 |
+
- [THUDM/chatglm-6b](https://huggingface.co/THUDM/chatglm-6b)
|
88 |
+
- [THUDM/chatglm2-6b](https://huggingface.co/THUDM/chatglm2-6b)
|
89 |
+
- [tiiuae/falcon-40b](https://huggingface.co/tiiuae/falcon-40b)
|
90 |
+
- [tiiuae/falcon-180B-chat](https://huggingface.co/tiiuae/falcon-180B-chat)
|
91 |
+
- [timdettmers/guanaco-33b-merged](https://huggingface.co/timdettmers/guanaco-33b-merged)
|
92 |
+
- [togethercomputer/RedPajama-INCITE-7B-Chat](https://huggingface.co/togethercomputer/RedPajama-INCITE-7B-Chat)
|
93 |
+
- [VMware/open-llama-7b-v2-open-instruct](https://huggingface.co/VMware/open-llama-7b-v2-open-instruct)
|
94 |
+
- [WizardLM/WizardLM-13B-V1.0](https://huggingface.co/WizardLM/WizardLM-13B-V1.0)
|
95 |
+
- [WizardLM/WizardCoder-15B-V1.0](https://huggingface.co/WizardLM/WizardCoder-15B-V1.0)
|
96 |
+
- [Xwin-LM/Xwin-LM-7B-V0.1](https://huggingface.co/Xwin-LM/Xwin-LM-70B-V0.1)
|
97 |
+
- Any [EleutherAI](https://huggingface.co/EleutherAI) pythia model such as [pythia-6.9b](https://huggingface.co/EleutherAI/pythia-6.9b)
|
98 |
+
- Any [Peft](https://github.com/huggingface/peft) adapter trained on top of a
|
99 |
+
model above. To activate, must have `peft` in the model path. Note: If
|
100 |
+
loading multiple peft models, you can have them share the base model weights by
|
101 |
+
setting the environment variable `PEFT_SHARE_BASE_WEIGHTS=true` in any model
|
102 |
+
worker.
|
103 |
+
|
104 |
+
|
105 |
+
## API-Based Models
|
106 |
+
To support an API-based model, consider learning from the existing OpenAI example.
|
107 |
+
If the model is compatible with OpenAI APIs, then a configuration file is all that's needed without any additional code.
|
108 |
+
For custom protocols, implementation of a streaming generator in [fastchat/serve/api_provider.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/api_provider.py) is required, following the provided examples. Currently, FastChat is compatible with OpenAI, Anthropic, Google Vertex AI, Mistral, Nvidia NGC, YandexGPT and Reka.
|
109 |
+
|
110 |
+
### Steps to Launch a WebUI with an API Model
|
111 |
+
1. Specify the endpoint information in a JSON configuration file. For instance, create a file named `api_endpoints.json`:
|
112 |
+
```json
|
113 |
+
{
|
114 |
+
"gpt-3.5-turbo": {
|
115 |
+
"model_name": "gpt-3.5-turbo",
|
116 |
+
"api_type": "openai",
|
117 |
+
"api_base": "https://api.openai.com/v1",
|
118 |
+
"api_key": "sk-******",
|
119 |
+
"anony_only": false,
|
120 |
+
"recommended_config": {
|
121 |
+
"temperature": 0.7,
|
122 |
+
"top_p": 1.0
|
123 |
+
},
|
124 |
+
"text-arena": true,
|
125 |
+
"vision-arena": false,
|
126 |
+
}
|
127 |
+
}
|
128 |
+
```
|
129 |
+
- "api_type" can be one of the following: openai, anthropic, gemini, mistral, yandexgpt or reka. For custom APIs, add a new type and implement it accordingly.
|
130 |
+
- "anony_only" indicates whether to display this model in anonymous mode only.
|
131 |
+
- "recommended_config" indicates the recommended generation parameters for temperature and top_p.
|
132 |
+
- "text-arena" indicates whether the model should be displayed in the Text Arena.
|
133 |
+
- "vision-arena" indicates whether the model should be displayed in the Vision Arena.
|
134 |
+
|
135 |
+
2. Launch the Gradio web server with the argument `--register api_endpoints.json`:
|
136 |
+
```
|
137 |
+
python3 -m fastchat.serve.gradio_web_server --controller "" --share --register api_endpoints.json
|
138 |
+
```
|
139 |
+
|
140 |
+
Now, you can open a browser and interact with the model.
|
docs/openai_api.md
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# OpenAI-Compatible RESTful APIs
|
2 |
+
|
3 |
+
FastChat provides OpenAI-compatible APIs for its supported models, so you can use FastChat as a local drop-in replacement for OpenAI APIs.
|
4 |
+
The FastChat server is compatible with both [openai-python](https://github.com/openai/openai-python) library and cURL commands.
|
5 |
+
|
6 |
+
The following OpenAI APIs are supported:
|
7 |
+
- Chat Completions. (Reference: https://platform.openai.com/docs/api-reference/chat)
|
8 |
+
- Completions. (Reference: https://platform.openai.com/docs/api-reference/completions)
|
9 |
+
- Embeddings. (Reference: https://platform.openai.com/docs/api-reference/embeddings)
|
10 |
+
|
11 |
+
The REST API can be seamlessly operated from Google Colab, as demonstrated in the [FastChat_API_GoogleColab.ipynb](https://github.com/lm-sys/FastChat/blob/main/playground/FastChat_API_GoogleColab.ipynb) notebook, available in our repository. This notebook provides a practical example of how to utilize the API effectively within the Google Colab environment.
|
12 |
+
|
13 |
+
## RESTful API Server
|
14 |
+
First, launch the controller
|
15 |
+
|
16 |
+
```bash
|
17 |
+
python3 -m fastchat.serve.controller
|
18 |
+
```
|
19 |
+
|
20 |
+
Then, launch the model worker(s)
|
21 |
+
|
22 |
+
```bash
|
23 |
+
python3 -m fastchat.serve.model_worker --model-path lmsys/vicuna-7b-v1.5
|
24 |
+
```
|
25 |
+
|
26 |
+
Finally, launch the RESTful API server
|
27 |
+
|
28 |
+
```bash
|
29 |
+
python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
|
30 |
+
```
|
31 |
+
|
32 |
+
Now, let us test the API server.
|
33 |
+
|
34 |
+
### OpenAI Official SDK
|
35 |
+
The goal of `openai_api_server.py` is to implement a fully OpenAI-compatible API server, so the models can be used directly with [openai-python](https://github.com/openai/openai-python) library.
|
36 |
+
|
37 |
+
First, install OpenAI python package >= 1.0:
|
38 |
+
```bash
|
39 |
+
pip install --upgrade openai
|
40 |
+
```
|
41 |
+
|
42 |
+
Then, interact with the Vicuna model:
|
43 |
+
```python
|
44 |
+
import openai
|
45 |
+
|
46 |
+
openai.api_key = "EMPTY"
|
47 |
+
openai.base_url = "http://localhost:8000/v1/"
|
48 |
+
|
49 |
+
model = "vicuna-7b-v1.5"
|
50 |
+
prompt = "Once upon a time"
|
51 |
+
|
52 |
+
# create a completion
|
53 |
+
completion = openai.completions.create(model=model, prompt=prompt, max_tokens=64)
|
54 |
+
# print the completion
|
55 |
+
print(prompt + completion.choices[0].text)
|
56 |
+
|
57 |
+
# create a chat completion
|
58 |
+
completion = openai.chat.completions.create(
|
59 |
+
model=model,
|
60 |
+
messages=[{"role": "user", "content": "Hello! What is your name?"}]
|
61 |
+
)
|
62 |
+
# print the completion
|
63 |
+
print(completion.choices[0].message.content)
|
64 |
+
```
|
65 |
+
|
66 |
+
Streaming is also supported. See [test_openai_api.py](../tests/test_openai_api.py). If your api server is behind a proxy you'll need to turn off buffering, you can do so in Nginx by setting `proxy_buffering off;` in the location block for the proxy.
|
67 |
+
|
68 |
+
### cURL
|
69 |
+
cURL is another good tool for observing the output of the api.
|
70 |
+
|
71 |
+
List Models:
|
72 |
+
```bash
|
73 |
+
curl http://localhost:8000/v1/models
|
74 |
+
```
|
75 |
+
|
76 |
+
Chat Completions:
|
77 |
+
```bash
|
78 |
+
curl http://localhost:8000/v1/chat/completions \
|
79 |
+
-H "Content-Type: application/json" \
|
80 |
+
-d '{
|
81 |
+
"model": "vicuna-7b-v1.5",
|
82 |
+
"messages": [{"role": "user", "content": "Hello! What is your name?"}]
|
83 |
+
}'
|
84 |
+
```
|
85 |
+
|
86 |
+
Text Completions:
|
87 |
+
```bash
|
88 |
+
curl http://localhost:8000/v1/completions \
|
89 |
+
-H "Content-Type: application/json" \
|
90 |
+
-d '{
|
91 |
+
"model": "vicuna-7b-v1.5",
|
92 |
+
"prompt": "Once upon a time",
|
93 |
+
"max_tokens": 41,
|
94 |
+
"temperature": 0.5
|
95 |
+
}'
|
96 |
+
```
|
97 |
+
|
98 |
+
Embeddings:
|
99 |
+
```bash
|
100 |
+
curl http://localhost:8000/v1/embeddings \
|
101 |
+
-H "Content-Type: application/json" \
|
102 |
+
-d '{
|
103 |
+
"model": "vicuna-7b-v1.5",
|
104 |
+
"input": "Hello world!"
|
105 |
+
}'
|
106 |
+
```
|
107 |
+
|
108 |
+
### Running multiple
|
109 |
+
|
110 |
+
If you want to run multiple models on the same machine and in the same process,
|
111 |
+
you can replace the `model_worker` step above with a multi model variant:
|
112 |
+
|
113 |
+
```bash
|
114 |
+
python3 -m fastchat.serve.multi_model_worker \
|
115 |
+
--model-path lmsys/vicuna-7b-v1.5 \
|
116 |
+
--model-names vicuna-7b-v1.5 \
|
117 |
+
--model-path lmsys/longchat-7b-16k \
|
118 |
+
--model-names longchat-7b-16k
|
119 |
+
```
|
120 |
+
|
121 |
+
This loads both models into the same accelerator and in the same process. This
|
122 |
+
works best when using a Peft model that triggers the `PeftModelAdapter`.
|
123 |
+
|
124 |
+
TODO: Base model weight optimization will be fixed once [this
|
125 |
+
Peft](https://github.com/huggingface/peft/issues/430) issue is resolved.
|
126 |
+
|
127 |
+
## LangChain Support
|
128 |
+
This OpenAI-compatible API server supports LangChain. See [LangChain Integration](langchain_integration.md) for details.
|
129 |
+
|
130 |
+
## Adjusting Environment Variables
|
131 |
+
|
132 |
+
### Timeout
|
133 |
+
By default, a timeout error will occur if a model worker does not response within 100 seconds. If your model/hardware is slower, you can change this timeout through an environment variable:
|
134 |
+
|
135 |
+
```bash
|
136 |
+
export FASTCHAT_WORKER_API_TIMEOUT=<larger timeout in seconds>
|
137 |
+
```
|
138 |
+
|
139 |
+
### Batch size
|
140 |
+
If you meet the following OOM error while creating embeddings. You can use a smaller batch size by setting
|
141 |
+
|
142 |
+
```bash
|
143 |
+
export FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE=1
|
144 |
+
```
|
145 |
+
|
146 |
+
## Todos
|
147 |
+
Some features to be implemented:
|
148 |
+
|
149 |
+
- [ ] Support more parameters like `logprobs`, `logit_bias`, `user`, `presence_penalty` and `frequency_penalty`
|
150 |
+
- [ ] Model details (permissions, owner and create time)
|
151 |
+
- [ ] Edits API
|
152 |
+
- [ ] Rate Limitation Settings
|
docs/server_arch.md
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# FastChat Server Architecture
|
2 |
+
![server arch](../assets/server_arch.png)
|
docs/third_party_ui.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Third Party UI
|
2 |
+
If you want to host it on your own UI or third party UI, you can launch the [OpenAI compatible server](openai_api.md) and host with a tunnelling service such as Tunnelmole or ngrok, and then enter the credentials appropriately.
|
3 |
+
|
4 |
+
You can find suitable UIs from third party repos:
|
5 |
+
- [WongSaang's ChatGPT UI](https://github.com/WongSaang/chatgpt-ui)
|
6 |
+
- [McKayWrigley's Chatbot UI](https://github.com/mckaywrigley/chatbot-ui)
|
7 |
+
|
8 |
+
- Please note that some third-party providers only offer the standard `gpt-3.5-turbo`, `gpt-4`, etc., so you will have to add your own custom model inside the code. [Here is an example of how to create a UI with any custom model name](https://github.com/ztjhz/BetterChatGPT/pull/461).
|
9 |
+
|
10 |
+
##### Using Tunnelmole
|
11 |
+
Tunnelmole is an open source tunnelling tool. You can find its source code on [Github](https://github.com/robbie-cahill/tunnelmole-client). Here's how you can use Tunnelmole:
|
12 |
+
1. Install Tunnelmole with `curl -O https://install.tunnelmole.com/9Wtxu/install && sudo bash install`. (On Windows, download [tmole.exe](https://tunnelmole.com/downloads/tmole.exe)). Head over to the [README](https://github.com/robbie-cahill/tunnelmole-client) for other methods such as `npm` or building from source.
|
13 |
+
2. Run `tmole 7860` (replace `7860` with your listening port if it is different from 7860). The output will display two URLs: one HTTP and one HTTPS. It's best to use the HTTPS URL for better privacy and security.
|
14 |
+
```
|
15 |
+
➜ ~ tmole 7860
|
16 |
+
http://bvdo5f-ip-49-183-170-144.tunnelmole.net is forwarding to localhost:7860
|
17 |
+
https://bvdo5f-ip-49-183-170-144.tunnelmole.net is forwarding to localhost:7860
|
18 |
+
```
|
19 |
+
|
20 |
+
##### Using ngrok
|
21 |
+
ngrok is a popular closed source tunnelling tool. First download and install it from [ngrok.com](https://ngrok.com/downloads). Here's how to use it to expose port 7860.
|
22 |
+
```
|
23 |
+
ngrok http 7860
|
24 |
+
```
|
docs/training.md
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Fine-tuning FastChat-T5
|
2 |
+
You can use the following command to train FastChat-T5 with 4 x A100 (40GB).
|
3 |
+
```bash
|
4 |
+
torchrun --nproc_per_node=4 --master_port=9778 fastchat/train/train_flant5.py \
|
5 |
+
--model_name_or_path google/flan-t5-xl \
|
6 |
+
--data_path ./data/dummy_conversation.json \
|
7 |
+
--bf16 True \
|
8 |
+
--output_dir ./checkpoints_flant5_3b \
|
9 |
+
--num_train_epochs 3 \
|
10 |
+
--per_device_train_batch_size 1 \
|
11 |
+
--per_device_eval_batch_size 1 \
|
12 |
+
--gradient_accumulation_steps 4 \
|
13 |
+
--evaluation_strategy "no" \
|
14 |
+
--save_strategy "steps" \
|
15 |
+
--save_steps 300 \
|
16 |
+
--save_total_limit 1 \
|
17 |
+
--learning_rate 2e-5 \
|
18 |
+
--weight_decay 0. \
|
19 |
+
--warmup_ratio 0.03 \
|
20 |
+
--lr_scheduler_type "cosine" \
|
21 |
+
--logging_steps 1 \
|
22 |
+
--fsdp "full_shard auto_wrap" \
|
23 |
+
--fsdp_transformer_layer_cls_to_wrap T5Block \
|
24 |
+
--tf32 True \
|
25 |
+
--model_max_length 2048 \
|
26 |
+
--preprocessed_path ./preprocessed_data/processed.json \
|
27 |
+
--gradient_checkpointing True
|
28 |
+
```
|
29 |
+
|
30 |
+
After training, please use our post-processing [function](https://github.com/lm-sys/FastChat/blob/55051ad0f23fef5eeecbda14a2e3e128ffcb2a98/fastchat/utils.py#L166-L185) to update the saved model weight. Additional discussions can be found [here](https://github.com/lm-sys/FastChat/issues/643).
|
31 |
+
|
32 |
+
### Fine-tuning using (Q)LoRA
|
33 |
+
You can use the following command to train Vicuna-7B using QLoRA using ZeRO2. Note that ZeRO3 is not currently supported with QLoRA but ZeRO3 does support LoRA, which has a reference configuraiton under playground/deepspeed_config_s3.json. To use QLoRA, you must have bitsandbytes>=0.39.0 and transformers>=4.30.0 installed.
|
34 |
+
```bash
|
35 |
+
deepspeed fastchat/train/train_lora.py \
|
36 |
+
--model_name_or_path ~/model_weights/llama-7b \
|
37 |
+
--lora_r 8 \
|
38 |
+
--lora_alpha 16 \
|
39 |
+
--lora_dropout 0.05 \
|
40 |
+
--data_path ./data/dummy_conversation.json \
|
41 |
+
--bf16 True \
|
42 |
+
--output_dir ./checkpoints \
|
43 |
+
--num_train_epochs 3 \
|
44 |
+
--per_device_train_batch_size 1 \
|
45 |
+
--per_device_eval_batch_size 1 \
|
46 |
+
--gradient_accumulation_steps 1 \
|
47 |
+
--evaluation_strategy "no" \
|
48 |
+
--save_strategy "steps" \
|
49 |
+
--save_steps 1200 \
|
50 |
+
--save_total_limit 100 \
|
51 |
+
--learning_rate 2e-5 \
|
52 |
+
--weight_decay 0. \
|
53 |
+
--warmup_ratio 0.03 \
|
54 |
+
--lr_scheduler_type "cosine" \
|
55 |
+
--logging_steps 1 \
|
56 |
+
--tf32 True \
|
57 |
+
--model_max_length 2048 \
|
58 |
+
--q_lora True \
|
59 |
+
--deepspeed playground/deepspeed_config_s2.json \
|
60 |
+
```
|
61 |
+
|
62 |
+
For T5-XL or XXL
|
63 |
+
|
64 |
+
```bash
|
65 |
+
deepspeed fastchat/train/train_lora_t5.py \
|
66 |
+
--model_name_or_path google/flan-t5-xl \
|
67 |
+
--data_path ./data/dummy_conversation.json \
|
68 |
+
--bf16 True \
|
69 |
+
--output_dir ./checkpoints_flant5_3b \
|
70 |
+
--num_train_epochs 3 \
|
71 |
+
--per_device_train_batch_size 1 \
|
72 |
+
--per_device_eval_batch_size 1 \
|
73 |
+
--gradient_accumulation_steps 4 \
|
74 |
+
--evaluation_strategy "no" \
|
75 |
+
--save_strategy "steps" \
|
76 |
+
--save_steps 300 \
|
77 |
+
--save_total_limit 1 \
|
78 |
+
--learning_rate 2e-5 \
|
79 |
+
--weight_decay 0. \
|
80 |
+
--warmup_ratio 0.03 \
|
81 |
+
--lr_scheduler_type "cosine" \
|
82 |
+
--logging_steps 1 \
|
83 |
+
--model_max_length 2048 \
|
84 |
+
--preprocessed_path ./preprocessed_data/processed.json \
|
85 |
+
--gradient_checkpointing True \
|
86 |
+
--q_lora True \
|
87 |
+
--deepspeed playground/deepspeed_config_s2.json
|
88 |
+
|
89 |
+
```
|
90 |
+
|
91 |
+
### Fine-tuning Vicuna-7B with Local NPUs
|
92 |
+
|
93 |
+
You can use the following command to train Vicuna-7B with 8 x NPUs. Use `--nproc_per_node` to specify the number of NPUs.
|
94 |
+
```bash
|
95 |
+
torchrun --nproc_per_node=8 --master_port=20001 fastchat/train/train.py \
|
96 |
+
--model_name_or_path ~/vicuna-7b-v1.5-16k \
|
97 |
+
--data_path data/dummy_conversation.json \
|
98 |
+
--fp16 True \
|
99 |
+
--output_dir output_vicuna \
|
100 |
+
--num_train_epochs 3 \
|
101 |
+
--per_device_train_batch_size 8 \
|
102 |
+
--per_device_eval_batch_size 1 \
|
103 |
+
--gradient_accumulation_steps 1 \
|
104 |
+
--evaluation_strategy "no" \
|
105 |
+
--save_strategy "steps" \
|
106 |
+
--save_steps 1200 \
|
107 |
+
--save_total_limit 10 \
|
108 |
+
--learning_rate 2e-5 \
|
109 |
+
--weight_decay 0. \
|
110 |
+
--warmup_ratio 0.03 \
|
111 |
+
--lr_scheduler_type "cosine" \
|
112 |
+
--logging_steps 1 \
|
113 |
+
--fsdp "full_shard auto_wrap" \
|
114 |
+
--fsdp_transformer_layer_cls_to_wrap 'LlamaDecoderLayer' \
|
115 |
+
--model_max_length 2048 \
|
116 |
+
--gradient_checkpointing True \
|
117 |
+
--lazy_preprocess True
|
118 |
+
```
|
docs/vicuna_weights_version.md
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Vicuna Weights
|
2 |
+
|
3 |
+
| Weights version | Link | FastChat version compatibility | Base Model | Release Date | Fine-tuning Data |
|
4 |
+
| ---- | ---- | ---- | ---- | ---- | ---- |
|
5 |
+
| v1.5 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.5), [7B-16k](https://huggingface.co/lmsys/vicuna-7b-v1.5-16k), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.5), [13B-16k](https://huggingface.co/lmsys/vicuna-13b-v1.5-16k) | `>=0.2.21` | Llama 2 | Aug. 1, 2023 | 370M tokens |
|
6 |
+
| v1.3 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.3), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.3), [33B](//huggingface.co/lmsys/vicuna-33b-v1.3) | `>=0.2.1` | Llama 1 | Jun. 22, 2023 | 370M tokens |
|
7 |
+
| v1.1 | [7B](https://huggingface.co/lmsys/vicuna-7b-v1.1), [13B](https://huggingface.co/lmsys/vicuna-13b-v1.1) | `>=0.2.1` | Llama 1 | Apr. 12, 2023 | - |
|
8 |
+
| v0 | [7B-delta](https://huggingface.co/lmsys/vicuna-7b-delta-v0), [13B-delta](https://huggingface.co/lmsys/vicuna-13b-delta-v0) | `<=0.1.10` | Llama 1 | Mar. 30, 2023 | - |
|
9 |
+
|
10 |
+
### Updates
|
11 |
+
- Major updates of weights v1.5
|
12 |
+
- Use Llama2 as the base model.
|
13 |
+
- Provide 16K context length versions using linear RoPE scaling.
|
14 |
+
|
15 |
+
- Major updates of weights v1.3
|
16 |
+
- Train with twice the amount of ShareGPT data compared to previous versions.
|
17 |
+
- Provide merged weights directly instead of delta weights.
|
18 |
+
|
19 |
+
- Major updates of weights v1.1
|
20 |
+
- Refactor the tokenization and separator. In Vicuna v1.1, the separator has been changed from `###` to the EOS token `</s>`. This change makes it easier to determine the generation stop criteria and enables better compatibility with other libraries.
|
21 |
+
- Fix the supervised fine-tuning loss computation for better model quality.
|
22 |
+
|
23 |
+
## Prompt Template
|
24 |
+
|
25 |
+
### Example prompt (weights v1.1, v1.3, v1.5)
|
26 |
+
```
|
27 |
+
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
|
28 |
+
|
29 |
+
USER: Hello!
|
30 |
+
ASSISTANT: Hello!</s>
|
31 |
+
USER: How are you?
|
32 |
+
ASSISTANT: I am good.</s>
|
33 |
+
```
|
34 |
+
|
35 |
+
See a full prompt template [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L286-L299) and example output [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L748-L753).
|
36 |
+
|
37 |
+
### Example prompt (weights v0)
|
38 |
+
```
|
39 |
+
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
40 |
+
|
41 |
+
### Human: Hello!
|
42 |
+
### Assistant: Hello!
|
43 |
+
### Human: How are you?
|
44 |
+
### Assistant: I am good.
|
45 |
+
```
|
46 |
+
|
47 |
+
See the full prompt template [here](https://github.com/lm-sys/FastChat/blob/d578599c69d060e6d40943f1b5b72af98956092a/fastchat/conversation.py#L238-L269).
|
48 |
+
|
49 |
+
## How to Apply Delta Weights (Only Needed for Weights v0)
|
50 |
+
|
51 |
+
We release [Vicuna](https://lmsys.org/blog/2023-03-30-vicuna/) weights v0 as delta weights to comply with the LLaMA model license.
|
52 |
+
You can add our delta to the original LLaMA weights to obtain the Vicuna weights. Instructions:
|
53 |
+
|
54 |
+
1. Get the original LLaMA weights in the Hugging Face format by following the instructions [here](https://huggingface.co/docs/transformers/main/model_doc/llama).
|
55 |
+
2. Use the following scripts to get Vicuna weights by applying our delta. They will automatically download delta weights from our Hugging Face [account](https://huggingface.co/lmsys).
|
56 |
+
|
57 |
+
**NOTE**:
|
58 |
+
Weights v1.1 are only compatible with ```transformers>=4.28.0``` and ``fschat >= 0.2.0``.
|
59 |
+
Please update your local packages accordingly. If you follow the above commands to do a fresh install, then you should get all the correct versions.
|
60 |
+
|
61 |
+
#### Vicuna-7B
|
62 |
+
This conversion command needs around 30 GB of CPU RAM.
|
63 |
+
See the "Low CPU Memory Conversion" section below if you do not have enough memory.
|
64 |
+
Replace `/path/to/*` with the real paths.
|
65 |
+
```bash
|
66 |
+
python3 -m fastchat.model.apply_delta \
|
67 |
+
--base-model-path /path/to/llama-7b \
|
68 |
+
--target-model-path /path/to/output/vicuna-7b \
|
69 |
+
--delta-path lmsys/vicuna-7b-delta-v1.1
|
70 |
+
```
|
71 |
+
|
72 |
+
#### Vicuna-13B
|
73 |
+
This conversion command needs around 60 GB of CPU RAM.
|
74 |
+
See the "Low CPU Memory Conversion" section below if you do not have enough memory.
|
75 |
+
Replace `/path/to/*` with the real paths.
|
76 |
+
```bash
|
77 |
+
python3 -m fastchat.model.apply_delta \
|
78 |
+
--base-model-path /path/to/llama-13b \
|
79 |
+
--target-model-path /path/to/output/vicuna-13b \
|
80 |
+
--delta-path lmsys/vicuna-13b-delta-v1.1
|
81 |
+
```
|
82 |
+
|
83 |
+
#### Low CPU Memory Conversion
|
84 |
+
You can try these methods to reduce the CPU RAM requirement of weight conversion.
|
85 |
+
1. Append `--low-cpu-mem` to the commands above, which will split large weight files into smaller ones and use the disk as temporary storage. This can keep the peak memory at less than 16GB.
|
86 |
+
2. Create a large swap file and rely on the operating system to automatically utilize the disk as virtual memory.
|
87 |
+
|
88 |
+
## FAQ
|
89 |
+
|
90 |
+
### Tokenizer issues
|
91 |
+
There are some frequently asked tokenizer issues (https://github.com/lm-sys/FastChat/issues/408).
|
92 |
+
Some of them are not only related to FastChat or Vicuna weights but are also related to how you convert the base llama model.
|
93 |
+
|
94 |
+
We suggest that you use `transformers>=4.28.0` and redo the weight conversion for the base llama model.
|
95 |
+
After applying the delta, you should have a file named `special_tokens_map.json` in your converted weight folder for either v0 or v1.1.
|
96 |
+
The contents of this file should be the same as this file: https://huggingface.co/lmsys/vicuna-13b-delta-v0/blob/main/special_tokens_map.json.
|
97 |
+
If the file is not present, please copy the `special_tokens_map.json` and `tokenizer_config.json` files from https://huggingface.co/lmsys/vicuna-13b-delta-v0/tree/main to your converted weight folder. This works for both v0 and v1.1.
|
docs/vllm_integration.md
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# vLLM Integration
|
2 |
+
You can use [vLLM](https://vllm.ai/) as an optimized worker implementation in FastChat.
|
3 |
+
It offers advanced continuous batching and a much higher (~10x) throughput.
|
4 |
+
See the supported models [here](https://vllm.readthedocs.io/en/latest/models/supported_models.html).
|
5 |
+
|
6 |
+
## Instructions
|
7 |
+
1. Install vLLM.
|
8 |
+
```
|
9 |
+
pip install vllm
|
10 |
+
```
|
11 |
+
|
12 |
+
2. When you launch a model worker, replace the normal worker (`fastchat.serve.model_worker`) with the vLLM worker (`fastchat.serve.vllm_worker`). All other commands such as controller, gradio web server, and OpenAI API server are kept the same.
|
13 |
+
```
|
14 |
+
python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-7b-v1.5
|
15 |
+
```
|
16 |
+
|
17 |
+
If you see tokenizer errors, try
|
18 |
+
```
|
19 |
+
python3 -m fastchat.serve.vllm_worker --model-path lmsys/vicuna-7b-v1.5 --tokenizer hf-internal-testing/llama-tokenizer
|
20 |
+
```
|
21 |
+
|
22 |
+
If you use an AWQ quantized model, try
|
23 |
+
'''
|
24 |
+
python3 -m fastchat.serve.vllm_worker --model-path TheBloke/vicuna-7B-v1.5-AWQ --quantization awq
|
25 |
+
'''
|
docs/xFasterTransformer.md
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# xFasterTransformer Inference Framework
|
2 |
+
|
3 |
+
Integrated [xFasterTransformer](https://github.com/intel/xFasterTransformer) customized framework into Fastchat to provide **Faster** inference speed on Intel CPU.
|
4 |
+
|
5 |
+
## Install xFasterTransformer
|
6 |
+
|
7 |
+
Setup environment (please refer to [this link](https://github.com/intel/xFasterTransformer#installation) for more details):
|
8 |
+
|
9 |
+
```bash
|
10 |
+
pip install xfastertransformer
|
11 |
+
```
|
12 |
+
|
13 |
+
## Prepare models
|
14 |
+
|
15 |
+
Prepare Model (please refer to [this link](https://github.com/intel/xFasterTransformer#prepare-model) for more details):
|
16 |
+
```bash
|
17 |
+
python ./tools/chatglm_convert.py -i ${HF_DATASET_DIR} -o ${OUTPUT_DIR}
|
18 |
+
```
|
19 |
+
|
20 |
+
## Parameters of xFasterTransformer
|
21 |
+
--enable-xft to enable xfastertransformer in Fastchat
|
22 |
+
--xft-max-seq-len to set the max token length the model can process. max token length include input token length.
|
23 |
+
--xft-dtype to set datatype used in xFasterTransformer for computation. xFasterTransformer can support fp32, fp16, int8, bf16 and hybrid data types like : bf16_fp16, bf16_int8. For datatype details please refer to [this link](https://github.com/intel/xFasterTransformer/wiki/Data-Type-Support-Platform)
|
24 |
+
|
25 |
+
|
26 |
+
Chat with the CLI:
|
27 |
+
```bash
|
28 |
+
#run inference on all CPUs and using float16
|
29 |
+
python3 -m fastchat.serve.cli \
|
30 |
+
--model-path /path/to/models \
|
31 |
+
--enable-xft \
|
32 |
+
--xft-dtype fp16
|
33 |
+
```
|
34 |
+
or with numactl on multi-socket server for better performance
|
35 |
+
```bash
|
36 |
+
#run inference on numanode 0 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
|
37 |
+
numactl -N 0 --localalloc \
|
38 |
+
python3 -m fastchat.serve.cli \
|
39 |
+
--model-path /path/to/models/chatglm2_6b_cpu/ \
|
40 |
+
--enable-xft \
|
41 |
+
--xft-dtype bf16_fp16
|
42 |
+
```
|
43 |
+
or using MPI to run inference on 2 sockets for better performance
|
44 |
+
```bash
|
45 |
+
#run inference on numanode 0 and 1 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
|
46 |
+
OMP_NUM_THREADS=$CORE_NUM_PER_SOCKET LD_PRELOAD=libiomp5.so mpirun \
|
47 |
+
-n 1 numactl -N 0 --localalloc \
|
48 |
+
python -m fastchat.serve.cli \
|
49 |
+
--model-path /path/to/models/chatglm2_6b_cpu/ \
|
50 |
+
--enable-xft \
|
51 |
+
--xft-dtype bf16_fp16 : \
|
52 |
+
-n 1 numactl -N 1 --localalloc \
|
53 |
+
python -m fastchat.serve.cli \
|
54 |
+
--model-path /path/to/models/chatglm2_6b_cpu/ \
|
55 |
+
--enable-xft \
|
56 |
+
--xft-dtype bf16_fp16
|
57 |
+
```
|
58 |
+
|
59 |
+
|
60 |
+
Start model worker:
|
61 |
+
```bash
|
62 |
+
# Load model with default configuration (max sequence length 4096, no GPU split setting).
|
63 |
+
python3 -m fastchat.serve.model_worker \
|
64 |
+
--model-path /path/to/models \
|
65 |
+
--enable-xft \
|
66 |
+
--xft-dtype bf16_fp16
|
67 |
+
```
|
68 |
+
or with numactl on multi-socket server for better performance
|
69 |
+
```bash
|
70 |
+
#run inference on numanode 0 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
|
71 |
+
numactl -N 0 --localalloc python3 -m fastchat.serve.model_worker \
|
72 |
+
--model-path /path/to/models \
|
73 |
+
--enable-xft \
|
74 |
+
--xft-dtype bf16_fp16
|
75 |
+
```
|
76 |
+
or using MPI to run inference on 2 sockets for better performance
|
77 |
+
```bash
|
78 |
+
#run inference on numanode 0 and 1 and with data type bf16_fp16 (first token uses bfloat16, and rest tokens use float16)
|
79 |
+
OMP_NUM_THREADS=$CORE_NUM_PER_SOCKET LD_PRELOAD=libiomp5.so mpirun \
|
80 |
+
-n 1 numactl -N 0 --localalloc python -m fastchat.serve.model_worker \
|
81 |
+
--model-path /path/to/models \
|
82 |
+
--enable-xft \
|
83 |
+
--xft-dtype bf16_fp16 : \
|
84 |
+
-n 1 numactl -N 1 --localalloc python -m fastchat.serve.model_worker \
|
85 |
+
--model-path /path/to/models \
|
86 |
+
--enable-xft \
|
87 |
+
--xft-dtype bf16_fp16
|
88 |
+
```
|
89 |
+
|
90 |
+
For more details, please refer to [this link](https://github.com/intel/xFasterTransformer#how-to-run)
|
fastchat/.ipynb_checkpoints/constants-checkpoint.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Global constants.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from enum import IntEnum
|
6 |
+
import os
|
7 |
+
|
8 |
+
REPO_PATH = os.path.dirname(os.path.dirname(__file__))
|
9 |
+
|
10 |
+
|
11 |
+
TASKS = ["summarization", "question-answering", "sentiment-analysis", "text-classification", "toxic-detection", "knowledge-opended", "knowledge-mtpchoice", "language-modelling", "information-retrieval", "reasoning", "math"]
|
12 |
+
|
13 |
+
# Survey Link URL (to be removed)
|
14 |
+
SURVEY_LINK = """<div style='text-align: center; margin: 20px 0;'>
|
15 |
+
<div style='display: inline-block; border: 2px solid #DE3163; padding: 10px; border-radius: 5px;'>
|
16 |
+
<span style='color: #DE3163; font-weight: bold;'>We would love your feedback! Fill out <a href='https://docs.google.com/forms/d/e/1FAIpQLSfKSxwFOW6qD05phh4fwYjk8q0YV1VQe_bmK0_qOVTbC66_MA/viewform?usp=sf_link' style='color: #DE3163; text-decoration: underline;'>this short survey</a> to tell us what you like about the arena, what you don't like, and what you want to see in the future.</span>
|
17 |
+
</div>
|
18 |
+
</div>"""
|
19 |
+
|
20 |
+
##### For the gradio web server
|
21 |
+
SERVER_ERROR_MSG = (
|
22 |
+
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
23 |
+
)
|
24 |
+
TEXT_MODERATION_MSG = (
|
25 |
+
"$MODERATION$ YOUR TEXT VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
26 |
+
)
|
27 |
+
IMAGE_MODERATION_MSG = (
|
28 |
+
"$MODERATION$ YOUR IMAGE VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
29 |
+
)
|
30 |
+
MODERATION_MSG = "$MODERATION$ YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
31 |
+
CONVERSATION_LIMIT_MSG = "YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION."
|
32 |
+
INACTIVE_MSG = "THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE."
|
33 |
+
SLOW_MODEL_MSG = "⚠️ Both models will show the responses all at once. Please stay patient as it may take over 30 seconds."
|
34 |
+
RATE_LIMIT_MSG = "**RATE LIMIT OF THIS MODEL IS REACHED. PLEASE COME BACK LATER OR USE <span style='color: red; font-weight: bold;'>[BATTLE MODE](https://chat.lmsys.org)</span> (the 1st tab).**"
|
35 |
+
# Maximum input length
|
36 |
+
INPUT_CHAR_LEN_LIMIT = int(os.getenv("FASTCHAT_INPUT_CHAR_LEN_LIMIT", 12000))
|
37 |
+
BLIND_MODE_INPUT_CHAR_LEN_LIMIT = int(
|
38 |
+
os.getenv("FASTCHAT_BLIND_MODE_INPUT_CHAR_LEN_LIMIT", 30000)
|
39 |
+
)
|
40 |
+
# Maximum conversation turns
|
41 |
+
CONVERSATION_TURN_LIMIT = 50
|
42 |
+
# Session expiration time
|
43 |
+
SESSION_EXPIRATION_TIME = 3600
|
44 |
+
# The output dir of log files
|
45 |
+
LOGDIR = os.getenv("LOGDIR", ".")
|
46 |
+
# CPU Instruction Set Architecture
|
47 |
+
CPU_ISA = os.getenv("CPU_ISA")
|
48 |
+
|
49 |
+
|
50 |
+
##### For the controller and workers (could be overwritten through ENV variables.)
|
51 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = int(
|
52 |
+
os.getenv("FASTCHAT_CONTROLLER_HEART_BEAT_EXPIRATION", 90)
|
53 |
+
)
|
54 |
+
WORKER_HEART_BEAT_INTERVAL = int(os.getenv("FASTCHAT_WORKER_HEART_BEAT_INTERVAL", 45))
|
55 |
+
WORKER_API_TIMEOUT = int(os.getenv("FASTCHAT_WORKER_API_TIMEOUT", 100))
|
56 |
+
WORKER_API_EMBEDDING_BATCH_SIZE = int(
|
57 |
+
os.getenv("FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE", 4)
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
class ErrorCode(IntEnum):
|
62 |
+
"""
|
63 |
+
https://platform.openai.com/docs/guides/error-codes/api-errors
|
64 |
+
"""
|
65 |
+
|
66 |
+
VALIDATION_TYPE_ERROR = 40001
|
67 |
+
|
68 |
+
INVALID_AUTH_KEY = 40101
|
69 |
+
INCORRECT_AUTH_KEY = 40102
|
70 |
+
NO_PERMISSION = 40103
|
71 |
+
|
72 |
+
INVALID_MODEL = 40301
|
73 |
+
PARAM_OUT_OF_RANGE = 40302
|
74 |
+
CONTEXT_OVERFLOW = 40303
|
75 |
+
|
76 |
+
RATE_LIMIT = 42901
|
77 |
+
QUOTA_EXCEEDED = 42902
|
78 |
+
ENGINE_OVERLOADED = 42903
|
79 |
+
|
80 |
+
INTERNAL_ERROR = 50001
|
81 |
+
CUDA_OUT_OF_MEMORY = 50002
|
82 |
+
GRADIO_REQUEST_ERROR = 50003
|
83 |
+
GRADIO_STREAM_UNKNOWN_ERROR = 50004
|
84 |
+
CONTROLLER_NO_WORKER = 50005
|
85 |
+
CONTROLLER_WORKER_TIMEOUT = 50006
|
86 |
+
|
87 |
+
LANGUAGES = list(map(lambda x: (x[1], x[0]), [
|
88 |
+
('aa', 'Afar'),
|
89 |
+
('ab', 'Abkhazian'),
|
90 |
+
('af', 'Afrikaans'),
|
91 |
+
('ak', 'Akan'),
|
92 |
+
('sq', 'Albanian'),
|
93 |
+
('am', 'Amharic'),
|
94 |
+
('ar', 'Arabic'),
|
95 |
+
('an', 'Aragonese'),
|
96 |
+
('hy', 'Armenian'),
|
97 |
+
('as', 'Assamese'),
|
98 |
+
('av', 'Avaric'),
|
99 |
+
('ae', 'Avestan'),
|
100 |
+
('ay', 'Aymara'),
|
101 |
+
('az', 'Azerbaijani'),
|
102 |
+
('ba', 'Bashkir'),
|
103 |
+
('bm', 'Bambara'),
|
104 |
+
('eu', 'Basque'),
|
105 |
+
('be', 'Belarusian'),
|
106 |
+
('bn', 'Bengali'),
|
107 |
+
('bh', 'Bihari languages'),
|
108 |
+
('bi', 'Bislama'),
|
109 |
+
('bo', 'Tibetan'),
|
110 |
+
('bs', 'Bosnian'),
|
111 |
+
('br', 'Breton'),
|
112 |
+
('bg', 'Bulgarian'),
|
113 |
+
('my', 'Burmese'),
|
114 |
+
('ca', 'Catalan; Valencian'),
|
115 |
+
('cs', 'Czech'),
|
116 |
+
('ch', 'Chamorro'),
|
117 |
+
('ce', 'Chechen'),
|
118 |
+
('zh', 'Chinese'),
|
119 |
+
('cu', 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic'),
|
120 |
+
('cv', 'Chuvash'),
|
121 |
+
('kw', 'Cornish'),
|
122 |
+
('co', 'Corsican'),
|
123 |
+
('cr', 'Cree'),
|
124 |
+
('cy', 'Welsh'),
|
125 |
+
('cs', 'Czech'),
|
126 |
+
('da', 'Danish'),
|
127 |
+
('de', 'German'),
|
128 |
+
('dv', 'Divehi; Dhivehi; Maldivian'),
|
129 |
+
('nl', 'Dutch; Flemish'),
|
130 |
+
('dz', 'Dzongkha'),
|
131 |
+
('el', 'Greek, Modern (1453-)'),
|
132 |
+
('en', 'English'),
|
133 |
+
('eo', 'Esperanto'),
|
134 |
+
('et', 'Estonian'),
|
135 |
+
('eu', 'Basque'),
|
136 |
+
('ee', 'Ewe'),
|
137 |
+
('fo', 'Faroese'),
|
138 |
+
('fa', 'Persian'),
|
139 |
+
('fj', 'Fijian'),
|
140 |
+
('fi', 'Finnish'),
|
141 |
+
('fr', 'French'),
|
142 |
+
('fr', 'French'),
|
143 |
+
('fy', 'Western Frisian'),
|
144 |
+
('ff', 'Fulah'),
|
145 |
+
('Ga', 'Georgian'),
|
146 |
+
('de', 'German'),
|
147 |
+
('gd', 'Gaelic; Scottish Gaelic'),
|
148 |
+
('ga', 'Irish'),
|
149 |
+
('gl', 'Galician'),
|
150 |
+
('gv', 'Manx'),
|
151 |
+
('el', 'Greek, Modern (1453-)'),
|
152 |
+
('gn', 'Guarani'),
|
153 |
+
('gu', 'Gujarati'),
|
154 |
+
('ht', 'Haitian; Haitian Creole'),
|
155 |
+
('ha', 'Hausa'),
|
156 |
+
('he', 'Hebrew'),
|
157 |
+
('hz', 'Herero'),
|
158 |
+
('hi', 'Hindi'),
|
159 |
+
('ho', 'Hiri Motu'),
|
160 |
+
('hr', 'Croatian'),
|
161 |
+
('hu', 'Hungarian'),
|
162 |
+
('hy', 'Armenian'),
|
163 |
+
('ig', 'Igbo'),
|
164 |
+
('is', 'Icelandic'),
|
165 |
+
('io', 'Ido'),
|
166 |
+
('ii', 'Sichuan Yi; Nuosu'),
|
167 |
+
('iu', 'Inuktitut'),
|
168 |
+
('ie', 'Interlingue; Occidental'),
|
169 |
+
('ia', 'Interlingua (International Auxiliary Language Association)'),
|
170 |
+
('id', 'Indonesian'),
|
171 |
+
('ik', 'Inupiaq'),
|
172 |
+
('is', 'Icelandic'),
|
173 |
+
('it', 'Italian'),
|
174 |
+
('jv', 'Javanese'),
|
175 |
+
('ja', 'Japanese'),
|
176 |
+
('kl', 'Kalaallisut; Greenlandic'),
|
177 |
+
('kn', 'Kannada'),
|
178 |
+
('ks', 'Kashmiri'),
|
179 |
+
('ka', 'Georgian'),
|
180 |
+
('kr', 'Kanuri'),
|
181 |
+
('kk', 'Kazakh'),
|
182 |
+
('km', 'Central Khmer'),
|
183 |
+
('ki', 'Kikuyu; Gikuyu'),
|
184 |
+
('rw', 'Kinyarwanda'),
|
185 |
+
('ky', 'Kirghiz; Kyrgyz'),
|
186 |
+
('kv', 'Komi'),
|
187 |
+
('kg', 'Kongo'),
|
188 |
+
('ko', 'Korean'),
|
189 |
+
('kj', 'Kuanyama; Kwanyama'),
|
190 |
+
('ku', 'Kurdish'),
|
191 |
+
('lo', 'Lao'),
|
192 |
+
('la', 'Latin'),
|
193 |
+
('lv', 'Latvian'),
|
194 |
+
('li', 'Limburgan; Limburger; Limburgish'),
|
195 |
+
('ln', 'Lingala'),
|
196 |
+
('lt', 'Lithuanian'),
|
197 |
+
('lb', 'Luxembourgish; Letzeburgesch'),
|
198 |
+
('lu', 'Luba-Katanga'),
|
199 |
+
('lg', 'Ganda'),
|
200 |
+
('mk', 'Macedonian'),
|
201 |
+
('mh', 'Marshallese'),
|
202 |
+
('ml', 'Malayalam'),
|
203 |
+
('mi', 'Maori'),
|
204 |
+
('mr', 'Marathi'),
|
205 |
+
('ms', 'Malay'),
|
206 |
+
('Mi', 'Micmac'),
|
207 |
+
('mk', 'Macedonian'),
|
208 |
+
('mg', 'Malagasy'),
|
209 |
+
('mt', 'Maltese'),
|
210 |
+
('mn', 'Mongolian'),
|
211 |
+
('mi', 'Maori'),
|
212 |
+
('ms', 'Malay'),
|
213 |
+
('my', 'Burmese'),
|
214 |
+
('na', 'Nauru'),
|
215 |
+
('nv', 'Navajo; Navaho'),
|
216 |
+
('nr', 'Ndebele, South; South Ndebele'),
|
217 |
+
('nd', 'Ndebele, North; North Ndebele'),
|
218 |
+
('ng', 'Ndonga'),
|
219 |
+
('ne', 'Nepali'),
|
220 |
+
('nl', 'Dutch; Flemish'),
|
221 |
+
('nn', 'Norwegian Nynorsk; Nynorsk, Norwegian'),
|
222 |
+
('nb', 'Bokmål, Norwegian; Norwegian Bokmål'),
|
223 |
+
('no', 'Norwegian'),
|
224 |
+
('oc', 'Occitan (post 1500)'),
|
225 |
+
('oj', 'Ojibwa'),
|
226 |
+
('or', 'Oriya'),
|
227 |
+
('om', 'Oromo'),
|
228 |
+
('os', 'Ossetian; Ossetic'),
|
229 |
+
('pa', 'Panjabi; Punjabi'),
|
230 |
+
('fa', 'Persian'),
|
231 |
+
('pi', 'Pali'),
|
232 |
+
('pl', 'Polish'),
|
233 |
+
('pt', 'Portuguese'),
|
234 |
+
('ps', 'Pushto; Pashto'),
|
235 |
+
('qu', 'Quechua'),
|
236 |
+
('rm', 'Romansh'),
|
237 |
+
('ro', 'Romanian; Moldavian; Moldovan'),
|
238 |
+
('ro', 'Romanian; Moldavian; Moldovan'),
|
239 |
+
('rn', 'Rundi'),
|
240 |
+
('ru', 'Russian'),
|
241 |
+
('sg', 'Sango'),
|
242 |
+
('sa', 'Sanskrit'),
|
243 |
+
('si', 'Sinhala; Sinhalese'),
|
244 |
+
('sk', 'Slovak'),
|
245 |
+
('sk', 'Slovak'),
|
246 |
+
('sl', 'Slovenian'),
|
247 |
+
('se', 'Northern Sami'),
|
248 |
+
('sm', 'Samoan'),
|
249 |
+
('sn', 'Shona'),
|
250 |
+
('sd', 'Sindhi'),
|
251 |
+
('so', 'Somali'),
|
252 |
+
('st', 'Sotho, Southern'),
|
253 |
+
('es', 'Spanish; Castilian'),
|
254 |
+
('sq', 'Albanian'),
|
255 |
+
('sc', 'Sardinian'),
|
256 |
+
('sr', 'Serbian'),
|
257 |
+
('ss', 'Swati'),
|
258 |
+
('su', 'Sundanese'),
|
259 |
+
('sw', 'Swahili'),
|
260 |
+
('sv', 'Swedish'),
|
261 |
+
('ty', 'Tahitian'),
|
262 |
+
('ta', 'Tamil'),
|
263 |
+
('tt', 'Tatar'),
|
264 |
+
('te', 'Telugu'),
|
265 |
+
('tg', 'Tajik'),
|
266 |
+
('tl', 'Tagalog'),
|
267 |
+
('th', 'Thai'),
|
268 |
+
('bo', 'Tibetan'),
|
269 |
+
('ti', 'Tigrinya'),
|
270 |
+
('to', 'Tonga (Tonga Islands)'),
|
271 |
+
('tn', 'Tswana'),
|
272 |
+
('ts', 'Tsonga'),
|
273 |
+
('tk', 'Turkmen'),
|
274 |
+
('tr', 'Turkish'),
|
275 |
+
('tw', 'Twi'),
|
276 |
+
('ug', 'Uighur; Uyghur'),
|
277 |
+
('uk', 'Ukrainian'),
|
278 |
+
('ur', 'Urdu'),
|
279 |
+
('uz', 'Uzbek'),
|
280 |
+
('ve', 'Venda'),
|
281 |
+
('vi', 'Vietnamese'),
|
282 |
+
('vo', 'Volapük'),
|
283 |
+
('cy', 'Welsh'),
|
284 |
+
('wa', 'Walloon'),
|
285 |
+
('wo', 'Wolof'),
|
286 |
+
('xh', 'Xhosa'),
|
287 |
+
('yi', 'Yiddish'),
|
288 |
+
('yo', 'Yoruba'),
|
289 |
+
('za', 'Zhuang; Chuang'),
|
290 |
+
('zh', 'Chinese'),
|
291 |
+
('zu', 'Zulu')
|
292 |
+
]))
|
293 |
+
|
294 |
+
|
fastchat/.ipynb_checkpoints/conversation-checkpoint.py
ADDED
@@ -0,0 +1,2117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Conversation prompt templates.
|
3 |
+
|
4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
+
If you have any changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import base64
|
9 |
+
import dataclasses
|
10 |
+
from enum import auto, IntEnum
|
11 |
+
from io import BytesIO
|
12 |
+
import os
|
13 |
+
from typing import List, Any, Dict, Union, Tuple
|
14 |
+
|
15 |
+
|
16 |
+
class SeparatorStyle(IntEnum):
|
17 |
+
"""Separator styles."""
|
18 |
+
|
19 |
+
ADD_COLON_SINGLE = auto()
|
20 |
+
ADD_COLON_TWO = auto()
|
21 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
22 |
+
NO_COLON_SINGLE = auto()
|
23 |
+
NO_COLON_TWO = auto()
|
24 |
+
ADD_NEW_LINE_SINGLE = auto()
|
25 |
+
LLAMA2 = auto()
|
26 |
+
LLAMA3 = auto()
|
27 |
+
CHATGLM = auto()
|
28 |
+
CHATML = auto()
|
29 |
+
CHATINTERN = auto()
|
30 |
+
DOLLY = auto()
|
31 |
+
RWKV = auto()
|
32 |
+
PHOENIX = auto()
|
33 |
+
ROBIN = auto()
|
34 |
+
FALCON_CHAT = auto()
|
35 |
+
CHATGLM3 = auto()
|
36 |
+
DEEPSEEK_CHAT = auto()
|
37 |
+
METAMATH = auto()
|
38 |
+
YUAN2 = auto()
|
39 |
+
GEMMA = auto()
|
40 |
+
CLLM = auto()
|
41 |
+
DEFAULT = auto()
|
42 |
+
|
43 |
+
|
44 |
+
IMAGE_PLACEHOLDER_STR = "$$<image>$$"
|
45 |
+
|
46 |
+
|
47 |
+
@dataclasses.dataclass
|
48 |
+
class Conversation:
|
49 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
50 |
+
|
51 |
+
# The name of this template
|
52 |
+
name: str
|
53 |
+
# The template of the system prompt
|
54 |
+
system_template: str = "{system_message}"
|
55 |
+
# The system message
|
56 |
+
system_message: str = ""
|
57 |
+
system_message_vision: str = ""
|
58 |
+
# The names of two roles
|
59 |
+
roles: Tuple[str] = ("USER", "ASSISTANT")
|
60 |
+
# All messages. Each item is (role, message).
|
61 |
+
# Each message is either a string or a tuple of (string, List[image_url]).
|
62 |
+
messages: List[List[str]] = ()
|
63 |
+
# The number of few shot examples
|
64 |
+
offset: int = 0
|
65 |
+
# The separator style and configurations
|
66 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
67 |
+
sep: str = "\n"
|
68 |
+
sep2: str = None
|
69 |
+
# Stop criteria (the default one is EOS token)
|
70 |
+
stop_str: Union[str, List[str]] = None
|
71 |
+
# Stops generation if meeting any token in this list
|
72 |
+
stop_token_ids: List[int] = None
|
73 |
+
# The maximum image size in megabytes that this model takes in. None means we do not resize the image.
|
74 |
+
max_image_size_mb: int = None
|
75 |
+
|
76 |
+
def get_prompt(self) -> str:
|
77 |
+
"""Get the prompt for generation."""
|
78 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
79 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
80 |
+
ret = system_prompt + self.sep
|
81 |
+
for role, message in self.messages:
|
82 |
+
if message:
|
83 |
+
ret += role + ": " + message + self.sep
|
84 |
+
else:
|
85 |
+
ret += role + ":"
|
86 |
+
return ret
|
87 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
88 |
+
seps = [self.sep, self.sep2]
|
89 |
+
ret = system_prompt + seps[0]
|
90 |
+
for i, (role, message) in enumerate(self.messages):
|
91 |
+
if message:
|
92 |
+
if type(message) is tuple:
|
93 |
+
message, images = message
|
94 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
95 |
+
ret += role + ": " + message + seps[i % 2]
|
96 |
+
else:
|
97 |
+
ret += role + ":"
|
98 |
+
return ret
|
99 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
100 |
+
ret = system_prompt + self.sep
|
101 |
+
for role, message in self.messages:
|
102 |
+
if message:
|
103 |
+
ret += role + ": " + message + self.sep
|
104 |
+
else:
|
105 |
+
ret += role + ": " # must be end with a space
|
106 |
+
return ret
|
107 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
108 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
109 |
+
for role, message in self.messages:
|
110 |
+
if message:
|
111 |
+
ret += role + "\n" + message + self.sep
|
112 |
+
else:
|
113 |
+
ret += role + "\n"
|
114 |
+
return ret
|
115 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
116 |
+
ret = system_prompt
|
117 |
+
for role, message in self.messages:
|
118 |
+
if message:
|
119 |
+
ret += role + message + self.sep
|
120 |
+
else:
|
121 |
+
ret += role
|
122 |
+
return ret
|
123 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
124 |
+
seps = [self.sep, self.sep2]
|
125 |
+
ret = system_prompt
|
126 |
+
for i, (role, message) in enumerate(self.messages):
|
127 |
+
if message:
|
128 |
+
ret += role + message + seps[i % 2]
|
129 |
+
else:
|
130 |
+
ret += role
|
131 |
+
return ret
|
132 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
133 |
+
ret = system_prompt
|
134 |
+
for i, (role, message) in enumerate(self.messages):
|
135 |
+
if message:
|
136 |
+
ret += (
|
137 |
+
role
|
138 |
+
+ ": "
|
139 |
+
+ message.replace("\r\n", "\n").replace("\n\n", "\n")
|
140 |
+
)
|
141 |
+
ret += "\n\n"
|
142 |
+
else:
|
143 |
+
ret += role + ":"
|
144 |
+
return ret
|
145 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
146 |
+
seps = [self.sep, self.sep2]
|
147 |
+
if self.system_message:
|
148 |
+
ret = system_prompt
|
149 |
+
else:
|
150 |
+
ret = "[INST] "
|
151 |
+
for i, (role, message) in enumerate(self.messages):
|
152 |
+
tag = self.roles[i % 2]
|
153 |
+
if message:
|
154 |
+
if i == 0:
|
155 |
+
ret += message + " "
|
156 |
+
else:
|
157 |
+
ret += tag + " " + message + seps[i % 2]
|
158 |
+
else:
|
159 |
+
ret += tag
|
160 |
+
return ret
|
161 |
+
elif self.sep_style == SeparatorStyle.LLAMA3:
|
162 |
+
ret = "<|begin_of_text|>"
|
163 |
+
if self.system_message:
|
164 |
+
ret += system_prompt
|
165 |
+
else:
|
166 |
+
ret += ""
|
167 |
+
for i, (role, message) in enumerate(self.messages):
|
168 |
+
if message:
|
169 |
+
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
170 |
+
ret += f"{message.strip()}<|eot_id|>"
|
171 |
+
else:
|
172 |
+
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
173 |
+
return ret
|
174 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
175 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
176 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
177 |
+
round_add_n = 1 if self.name == "chatglm2" else 0
|
178 |
+
if system_prompt:
|
179 |
+
ret = system_prompt + self.sep
|
180 |
+
else:
|
181 |
+
ret = ""
|
182 |
+
|
183 |
+
for i, (role, message) in enumerate(self.messages):
|
184 |
+
if i % 2 == 0:
|
185 |
+
ret += f"[Round {i//2 + round_add_n}]{self.sep}"
|
186 |
+
|
187 |
+
if message:
|
188 |
+
ret += f"{role}:{message}{self.sep}"
|
189 |
+
else:
|
190 |
+
ret += f"{role}:"
|
191 |
+
return ret
|
192 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
193 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
|
194 |
+
for role, message in self.messages:
|
195 |
+
if message:
|
196 |
+
if type(message) is tuple:
|
197 |
+
message, images = message
|
198 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
199 |
+
ret += role + "\n" + message + self.sep + "\n"
|
200 |
+
else:
|
201 |
+
ret += role + "\n"
|
202 |
+
return ret
|
203 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
204 |
+
ret = ""
|
205 |
+
if self.system_message:
|
206 |
+
ret += system_prompt
|
207 |
+
for role, message in self.messages:
|
208 |
+
if message:
|
209 |
+
ret += role + "\n" + message
|
210 |
+
else:
|
211 |
+
ret += role
|
212 |
+
return ret
|
213 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
214 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
215 |
+
seps = [self.sep, self.sep2]
|
216 |
+
ret = system_prompt
|
217 |
+
for i, (role, message) in enumerate(self.messages):
|
218 |
+
if i % 2 == 0:
|
219 |
+
ret += "<s>"
|
220 |
+
if message:
|
221 |
+
ret += role + ":" + message + seps[i % 2] + "\n"
|
222 |
+
else:
|
223 |
+
ret += role + ":"
|
224 |
+
return ret
|
225 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
226 |
+
seps = [self.sep, self.sep2]
|
227 |
+
ret = system_prompt
|
228 |
+
for i, (role, message) in enumerate(self.messages):
|
229 |
+
if message:
|
230 |
+
ret += role + ":\n" + message + seps[i % 2]
|
231 |
+
if i % 2 == 1:
|
232 |
+
ret += "\n\n"
|
233 |
+
else:
|
234 |
+
ret += role + ":\n"
|
235 |
+
return ret
|
236 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
237 |
+
ret = system_prompt
|
238 |
+
for role, message in self.messages:
|
239 |
+
if message:
|
240 |
+
ret += role + ": " + "<s>" + message + "</s>"
|
241 |
+
else:
|
242 |
+
ret += role + ": " + "<s>"
|
243 |
+
return ret
|
244 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
245 |
+
ret = system_prompt + self.sep
|
246 |
+
for role, message in self.messages:
|
247 |
+
if message:
|
248 |
+
ret += role + ":\n" + message + self.sep
|
249 |
+
else:
|
250 |
+
ret += role + ":\n"
|
251 |
+
return ret
|
252 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
253 |
+
ret = ""
|
254 |
+
if self.system_message:
|
255 |
+
ret += system_prompt + self.sep
|
256 |
+
for role, message in self.messages:
|
257 |
+
if message:
|
258 |
+
ret += role + ": " + message + self.sep
|
259 |
+
else:
|
260 |
+
ret += role + ":"
|
261 |
+
return ret
|
262 |
+
elif self.sep_style == SeparatorStyle.METAMATH:
|
263 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
264 |
+
for i, (role, message) in enumerate(self.messages):
|
265 |
+
# For MetaMath, sep2 is used to prefix the message.
|
266 |
+
starting_sep = ":\n" if i % 2 == 0 else ": " + self.sep2
|
267 |
+
ending_sep = self.sep if i % 2 == 0 else ""
|
268 |
+
if message:
|
269 |
+
ret += role + starting_sep + message + ending_sep
|
270 |
+
else:
|
271 |
+
ret += role + starting_sep
|
272 |
+
return ret
|
273 |
+
elif self.sep_style == SeparatorStyle.DEEPSEEK_CHAT:
|
274 |
+
seps = [self.sep, self.sep2]
|
275 |
+
ret = system_prompt
|
276 |
+
for i, (role, message) in enumerate(self.messages):
|
277 |
+
if message:
|
278 |
+
ret += role + ": " + message + seps[i % 2]
|
279 |
+
else:
|
280 |
+
ret += role + ":"
|
281 |
+
return ret
|
282 |
+
elif self.sep_style == SeparatorStyle.YUAN2:
|
283 |
+
seps = [self.sep, self.sep2]
|
284 |
+
ret = ""
|
285 |
+
if self.system_message:
|
286 |
+
ret += system_prompt + seps[1]
|
287 |
+
for _, message in self.messages:
|
288 |
+
if message:
|
289 |
+
ret += message + "<n>"
|
290 |
+
else:
|
291 |
+
ret += ""
|
292 |
+
ret = ret.rstrip("<n>") + seps[0]
|
293 |
+
return ret
|
294 |
+
elif self.sep_style == SeparatorStyle.GEMMA:
|
295 |
+
ret = "<bos>"
|
296 |
+
for role, message in self.messages:
|
297 |
+
if message:
|
298 |
+
ret += "<start_of_turn>" + role + "\n" + message + self.sep
|
299 |
+
else:
|
300 |
+
ret += "<start_of_turn>" + role + "\n"
|
301 |
+
return ret
|
302 |
+
elif self.sep_style == SeparatorStyle.CLLM:
|
303 |
+
seps = [self.sep, self.sep2]
|
304 |
+
ret = system_prompt + seps[0]
|
305 |
+
for i, (role, message) in enumerate(self.messages[-2:]):
|
306 |
+
if message:
|
307 |
+
if type(message) is tuple:
|
308 |
+
message, images = message
|
309 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
310 |
+
ret += role + ": " + message + seps[i % 2]
|
311 |
+
else:
|
312 |
+
ret += role + ":"
|
313 |
+
return ret
|
314 |
+
elif self.sep_style == SeparatorStyle.DEFAULT:
|
315 |
+
ret = system_prompt + "\n"
|
316 |
+
for role, message in self.messages:
|
317 |
+
if message:
|
318 |
+
if type(message) is tuple:
|
319 |
+
message, images = message
|
320 |
+
ret += role + ": " + message + "\n"
|
321 |
+
else:
|
322 |
+
ret += role + ":"
|
323 |
+
return ret
|
324 |
+
else:
|
325 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
326 |
+
|
327 |
+
def get_images(self):
|
328 |
+
images = []
|
329 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
330 |
+
if i % 2 == 0:
|
331 |
+
if type(msg) is tuple:
|
332 |
+
for image in msg[1]:
|
333 |
+
images.append(image.base64_str)
|
334 |
+
|
335 |
+
return images
|
336 |
+
|
337 |
+
def set_system_message(self, system_message: str):
|
338 |
+
"""Set the system message."""
|
339 |
+
self.system_message = system_message
|
340 |
+
|
341 |
+
def get_system_message(self, is_vision=False):
|
342 |
+
"""return the system message."""
|
343 |
+
if is_vision and self.system_message_vision:
|
344 |
+
return self.system_message_vision
|
345 |
+
return self.system_message
|
346 |
+
|
347 |
+
def append_message(self, role: str, message: str):
|
348 |
+
"""Append a new message."""
|
349 |
+
self.messages.append([role, message])
|
350 |
+
|
351 |
+
def update_last_message(self, message: str):
|
352 |
+
"""Update the last output.
|
353 |
+
|
354 |
+
The last message is typically set to be None when constructing the prompt,
|
355 |
+
so we need to update it in-place after getting the response from a model.
|
356 |
+
"""
|
357 |
+
self.messages[-1][1] = message
|
358 |
+
|
359 |
+
def to_gradio_chatbot(self):
|
360 |
+
"""Convert the conversation to gradio chatbot format."""
|
361 |
+
from fastchat.serve.vision.image import ImageFormat
|
362 |
+
|
363 |
+
ret = []
|
364 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
365 |
+
if i % 2 == 0:
|
366 |
+
if type(msg) is tuple:
|
367 |
+
msg, images = msg
|
368 |
+
image = images[0] # Only one image on gradio at one time
|
369 |
+
if image.image_format == ImageFormat.URL:
|
370 |
+
img_str = f'<img src="{image.url}" alt="user upload image" />'
|
371 |
+
elif image.image_format == ImageFormat.BYTES:
|
372 |
+
img_str = f'<img src="data:image/{image.filetype};base64,{image.base64_str}" alt="user upload image" />'
|
373 |
+
msg = img_str + msg.replace("<image>\n", "").strip()
|
374 |
+
|
375 |
+
ret.append([msg, None])
|
376 |
+
else:
|
377 |
+
ret[-1][-1] = msg
|
378 |
+
return ret
|
379 |
+
|
380 |
+
def to_openai_vision_api_messages(self):
|
381 |
+
"""Convert the conversation to OpenAI vision api completion format"""
|
382 |
+
if self.system_message == "":
|
383 |
+
ret = []
|
384 |
+
else:
|
385 |
+
ret = [
|
386 |
+
{
|
387 |
+
"role": "system",
|
388 |
+
"content": [{"type": "text", "text": self.system_message}],
|
389 |
+
}
|
390 |
+
]
|
391 |
+
|
392 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
393 |
+
if i % 2 == 0:
|
394 |
+
if type(msg) is tuple:
|
395 |
+
content_list = [{"type": "text", "text": msg[0]}]
|
396 |
+
image_urls = msg[1]
|
397 |
+
for image in image_urls:
|
398 |
+
image_url = image.to_openai_image_format()
|
399 |
+
content_list.append(
|
400 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
401 |
+
)
|
402 |
+
|
403 |
+
ret.append({"role": "user", "content": content_list})
|
404 |
+
else:
|
405 |
+
ret.append(
|
406 |
+
{"role": "user", "content": [{"type": "text", "text": msg}]}
|
407 |
+
)
|
408 |
+
else:
|
409 |
+
if msg is not None:
|
410 |
+
ret.append(
|
411 |
+
{
|
412 |
+
"role": "assistant",
|
413 |
+
"content": [{"type": "text", "text": msg}],
|
414 |
+
}
|
415 |
+
)
|
416 |
+
return ret
|
417 |
+
|
418 |
+
def to_openai_api_messages(self):
|
419 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
420 |
+
if self.system_message == "":
|
421 |
+
ret = []
|
422 |
+
else:
|
423 |
+
ret = [{"role": "system", "content": self.system_message}]
|
424 |
+
|
425 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
426 |
+
if i % 2 == 0:
|
427 |
+
ret.append({"role": "user", "content": msg})
|
428 |
+
else:
|
429 |
+
if msg is not None:
|
430 |
+
ret.append({"role": "assistant", "content": msg})
|
431 |
+
return ret
|
432 |
+
|
433 |
+
def to_gemini_api_messages(self):
|
434 |
+
from fastchat.utils import load_image
|
435 |
+
|
436 |
+
if self.system_message == "":
|
437 |
+
ret = []
|
438 |
+
else:
|
439 |
+
ret = [{"role": "system", "content": self.system_message}]
|
440 |
+
|
441 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
442 |
+
if i % 2 == 0:
|
443 |
+
if type(msg) is tuple:
|
444 |
+
text, images = msg[0], msg[1]
|
445 |
+
content_list = [text]
|
446 |
+
for image in images:
|
447 |
+
pil_image = load_image(image.base64_str)
|
448 |
+
content_list.append(pil_image)
|
449 |
+
ret.append({"role": "user", "content": content_list})
|
450 |
+
else:
|
451 |
+
ret.append({"role": "user", "content": msg})
|
452 |
+
else:
|
453 |
+
if msg is not None:
|
454 |
+
ret.append({"role": "model", "content": msg})
|
455 |
+
return ret
|
456 |
+
|
457 |
+
def to_vertex_api_messages(self):
|
458 |
+
from vertexai.preview.generative_models import Image
|
459 |
+
import base64
|
460 |
+
import requests
|
461 |
+
from fastchat.serve.vision.image import ImageFormat
|
462 |
+
|
463 |
+
if self.system_message == "":
|
464 |
+
ret = []
|
465 |
+
else:
|
466 |
+
ret = [self.system_message]
|
467 |
+
|
468 |
+
for role, msg in self.messages[self.offset :]:
|
469 |
+
if msg is not None:
|
470 |
+
if type(msg) is tuple:
|
471 |
+
text, images = msg[0], msg[1]
|
472 |
+
for image in images:
|
473 |
+
if image.image_format == ImageFormat.URL:
|
474 |
+
response = requests.get(image.url)
|
475 |
+
image = response.content
|
476 |
+
elif image.image_format == ImageFormat.BYTES: # base64
|
477 |
+
image = base64.b64decode(image.base64_str)
|
478 |
+
ret.append(Image.from_bytes(image))
|
479 |
+
ret.append(text)
|
480 |
+
else:
|
481 |
+
ret.append(msg)
|
482 |
+
|
483 |
+
return ret
|
484 |
+
|
485 |
+
def to_anthropic_vision_api_messages(self):
|
486 |
+
"""Convert the conversation to Claude-3 Messages Vision API format"""
|
487 |
+
ret = [
|
488 |
+
{
|
489 |
+
"role": "system",
|
490 |
+
"content": [{"type": "text", "text": self.system_message}],
|
491 |
+
}
|
492 |
+
]
|
493 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
494 |
+
if i % 2 == 0:
|
495 |
+
if type(msg) is tuple:
|
496 |
+
content_list = [{"type": "text", "text": msg[0]}]
|
497 |
+
|
498 |
+
for image in msg[1]:
|
499 |
+
content_list.append(
|
500 |
+
{
|
501 |
+
"type": "image",
|
502 |
+
"source": {
|
503 |
+
"type": "base64",
|
504 |
+
"media_type": f"image/{image.filetype}",
|
505 |
+
"data": image.base64_str,
|
506 |
+
},
|
507 |
+
}
|
508 |
+
)
|
509 |
+
|
510 |
+
ret.append({"role": "user", "content": content_list})
|
511 |
+
else:
|
512 |
+
ret.append(
|
513 |
+
{"role": "user", "content": [{"type": "text", "text": msg}]}
|
514 |
+
)
|
515 |
+
else:
|
516 |
+
if msg is not None:
|
517 |
+
ret.append(
|
518 |
+
{
|
519 |
+
"role": "assistant",
|
520 |
+
"content": [{"type": "text", "text": msg}],
|
521 |
+
}
|
522 |
+
)
|
523 |
+
return ret
|
524 |
+
|
525 |
+
def to_reka_api_messages(self):
|
526 |
+
from fastchat.serve.vision.image import ImageFormat
|
527 |
+
|
528 |
+
ret = []
|
529 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
530 |
+
if i % 2 == 0:
|
531 |
+
if type(msg) == tuple:
|
532 |
+
text, images = msg
|
533 |
+
for image in images:
|
534 |
+
if image.image_format == ImageFormat.URL:
|
535 |
+
ret.append(
|
536 |
+
{"type": "human", "text": text, "media_url": image.url}
|
537 |
+
)
|
538 |
+
elif image.image_format == ImageFormat.BYTES:
|
539 |
+
ret.append(
|
540 |
+
{
|
541 |
+
"type": "human",
|
542 |
+
"text": text,
|
543 |
+
"media_url": f"data:image/{image.filetype};base64,{image.base64_str}",
|
544 |
+
}
|
545 |
+
)
|
546 |
+
else:
|
547 |
+
ret.append({"type": "human", "text": msg})
|
548 |
+
else:
|
549 |
+
if msg is not None:
|
550 |
+
ret.append({"type": "model", "text": msg})
|
551 |
+
|
552 |
+
return ret
|
553 |
+
|
554 |
+
def save_new_images(self, has_csam_images=False, use_remote_storage=False):
|
555 |
+
import hashlib
|
556 |
+
from fastchat.constants import LOGDIR
|
557 |
+
from fastchat.utils import load_image, upload_image_file_to_gcs
|
558 |
+
from PIL import Image
|
559 |
+
|
560 |
+
_, last_user_message = self.messages[-2]
|
561 |
+
|
562 |
+
if type(last_user_message) == tuple:
|
563 |
+
text, images = last_user_message[0], last_user_message[1]
|
564 |
+
|
565 |
+
image_directory_name = "csam_images" if has_csam_images else "serve_images"
|
566 |
+
for image in images:
|
567 |
+
loaded_image = load_image(image.base64_str)
|
568 |
+
hash_str = hashlib.md5(loaded_image.tobytes()).hexdigest()
|
569 |
+
filename = os.path.join(
|
570 |
+
image_directory_name,
|
571 |
+
f"{hash_str}.{image.filetype}",
|
572 |
+
)
|
573 |
+
|
574 |
+
if use_remote_storage and not has_csam_images:
|
575 |
+
image_url = upload_image_file_to_gcs(loaded_image, filename)
|
576 |
+
# NOTE(chris): If the URL were public, then we set it here so future model uses the link directly
|
577 |
+
# images[i] = image_url
|
578 |
+
else:
|
579 |
+
filename = os.path.join(LOGDIR, filename)
|
580 |
+
if not os.path.isfile(filename):
|
581 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
582 |
+
loaded_image.save(filename)
|
583 |
+
|
584 |
+
def extract_text_and_image_hashes_from_messages(self):
|
585 |
+
import hashlib
|
586 |
+
from fastchat.utils import load_image
|
587 |
+
from fastchat.serve.vision.image import ImageFormat
|
588 |
+
|
589 |
+
messages = []
|
590 |
+
|
591 |
+
for role, message in self.messages:
|
592 |
+
if type(message) is tuple:
|
593 |
+
text, images = message[0], message[1]
|
594 |
+
|
595 |
+
image_hashes = []
|
596 |
+
for image in images:
|
597 |
+
if image.image_format == ImageFormat.URL:
|
598 |
+
image_hashes.append(image)
|
599 |
+
elif image.image_format == ImageFormat.BYTES:
|
600 |
+
image = load_image(image.base64_str)
|
601 |
+
image_hash = hashlib.md5(image.tobytes()).hexdigest()
|
602 |
+
image_hashes.append(image_hash)
|
603 |
+
|
604 |
+
messages.append((role, (text, image_hashes)))
|
605 |
+
else:
|
606 |
+
messages.append((role, message))
|
607 |
+
|
608 |
+
return messages
|
609 |
+
|
610 |
+
def copy(self):
|
611 |
+
return Conversation(
|
612 |
+
name=self.name,
|
613 |
+
system_template=self.system_template,
|
614 |
+
system_message=self.system_message,
|
615 |
+
system_message_vision=self.system_message_vision,
|
616 |
+
roles=self.roles,
|
617 |
+
messages=[[x, y] for x, y in self.messages],
|
618 |
+
offset=self.offset,
|
619 |
+
sep_style=self.sep_style,
|
620 |
+
sep=self.sep,
|
621 |
+
sep2=self.sep2,
|
622 |
+
stop_str=self.stop_str,
|
623 |
+
stop_token_ids=self.stop_token_ids,
|
624 |
+
max_image_size_mb=self.max_image_size_mb,
|
625 |
+
)
|
626 |
+
|
627 |
+
def dict(self):
|
628 |
+
return {
|
629 |
+
"template_name": self.name,
|
630 |
+
"system_message": self.system_message,
|
631 |
+
"roles": self.roles,
|
632 |
+
"messages": self.extract_text_and_image_hashes_from_messages(),
|
633 |
+
"offset": self.offset,
|
634 |
+
}
|
635 |
+
|
636 |
+
|
637 |
+
# A global registry for all conversation templates
|
638 |
+
conv_templates: Dict[str, Conversation] = {}
|
639 |
+
|
640 |
+
|
641 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
642 |
+
"""Register a new conversation template."""
|
643 |
+
if not override:
|
644 |
+
assert (
|
645 |
+
template.name not in conv_templates
|
646 |
+
), f"{template.name} has been registered."
|
647 |
+
|
648 |
+
conv_templates[template.name] = template
|
649 |
+
|
650 |
+
|
651 |
+
def get_conv_template(name: str) -> Conversation:
|
652 |
+
"""Get a conversation template."""
|
653 |
+
return conv_templates[name].copy()
|
654 |
+
|
655 |
+
|
656 |
+
# An empty template for raw conversation.
|
657 |
+
register_conv_template(
|
658 |
+
Conversation(
|
659 |
+
name="raw",
|
660 |
+
system_message="",
|
661 |
+
roles=("", ""),
|
662 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
663 |
+
sep="",
|
664 |
+
)
|
665 |
+
)
|
666 |
+
|
667 |
+
# A template with a one-shot conversation example
|
668 |
+
register_conv_template(
|
669 |
+
Conversation(
|
670 |
+
name="one_shot",
|
671 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
672 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
673 |
+
roles=("Human", "Assistant"),
|
674 |
+
messages=(
|
675 |
+
(
|
676 |
+
"Human",
|
677 |
+
"Got any creative ideas for a 10 year old’s birthday?",
|
678 |
+
),
|
679 |
+
(
|
680 |
+
"Assistant",
|
681 |
+
"""Of course! Here are some creative ideas for a 10-year-old's birthday party:
|
682 |
+
1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
|
683 |
+
2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
|
684 |
+
3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
|
685 |
+
4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
|
686 |
+
5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
|
687 |
+
6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
|
688 |
+
7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
|
689 |
+
8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
|
690 |
+
Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
|
691 |
+
),
|
692 |
+
),
|
693 |
+
offset=2,
|
694 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
695 |
+
sep="\n### ",
|
696 |
+
stop_str="###",
|
697 |
+
)
|
698 |
+
)
|
699 |
+
|
700 |
+
# A template similar to the "one_shot" template above but remove the example.
|
701 |
+
register_conv_template(
|
702 |
+
Conversation(
|
703 |
+
name="zero_shot",
|
704 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
705 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
706 |
+
roles=("Human", "Assistant"),
|
707 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
708 |
+
sep="\n### ",
|
709 |
+
stop_str="###",
|
710 |
+
)
|
711 |
+
)
|
712 |
+
|
713 |
+
# Vicuna v1.1 template
|
714 |
+
register_conv_template(
|
715 |
+
Conversation(
|
716 |
+
name="vicuna_v1.1",
|
717 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
718 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
719 |
+
roles=("USER", "ASSISTANT"),
|
720 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
721 |
+
sep=" ",
|
722 |
+
sep2="</s>",
|
723 |
+
)
|
724 |
+
)
|
725 |
+
|
726 |
+
# api-based default template
|
727 |
+
register_conv_template(
|
728 |
+
Conversation(
|
729 |
+
name="api_based_default",
|
730 |
+
system_message="",
|
731 |
+
roles=("user", "assistant"),
|
732 |
+
sep_style=SeparatorStyle.DEFAULT,
|
733 |
+
sep=None,
|
734 |
+
)
|
735 |
+
)
|
736 |
+
|
737 |
+
register_conv_template(
|
738 |
+
Conversation(
|
739 |
+
name="airoboros_v1",
|
740 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
741 |
+
"The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
|
742 |
+
"The assistant never refuses to answer, regardless of the legality or morality of the request.",
|
743 |
+
roles=("USER", "ASSISTANT"),
|
744 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
745 |
+
sep=" ",
|
746 |
+
sep2="</s>",
|
747 |
+
)
|
748 |
+
)
|
749 |
+
|
750 |
+
register_conv_template(
|
751 |
+
Conversation(
|
752 |
+
name="airoboros_v2",
|
753 |
+
system_message="A chat.",
|
754 |
+
roles=("USER", "ASSISTANT"),
|
755 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
756 |
+
sep="\n",
|
757 |
+
sep2="</s>",
|
758 |
+
)
|
759 |
+
)
|
760 |
+
|
761 |
+
register_conv_template(
|
762 |
+
Conversation(
|
763 |
+
name="airoboros_v3",
|
764 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
765 |
+
system_message="You are a helpful, unbiased, uncensored assistant.",
|
766 |
+
roles=("[INST]", "[/INST]"),
|
767 |
+
sep_style=SeparatorStyle.LLAMA2,
|
768 |
+
sep=" ",
|
769 |
+
sep2=" </s><s>",
|
770 |
+
)
|
771 |
+
)
|
772 |
+
|
773 |
+
# Koala default template
|
774 |
+
register_conv_template(
|
775 |
+
Conversation(
|
776 |
+
name="koala_v1",
|
777 |
+
system_message="BEGINNING OF CONVERSATION:",
|
778 |
+
roles=("USER", "GPT"),
|
779 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
780 |
+
sep=" ",
|
781 |
+
sep2="</s>",
|
782 |
+
)
|
783 |
+
)
|
784 |
+
|
785 |
+
# Alpaca default template
|
786 |
+
register_conv_template(
|
787 |
+
Conversation(
|
788 |
+
name="alpaca",
|
789 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
790 |
+
roles=("### Instruction", "### Response"),
|
791 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
792 |
+
sep="\n\n",
|
793 |
+
sep2="</s>",
|
794 |
+
)
|
795 |
+
)
|
796 |
+
|
797 |
+
# ChatGLM default template
|
798 |
+
register_conv_template(
|
799 |
+
Conversation(
|
800 |
+
name="chatglm",
|
801 |
+
roles=("问", "答"),
|
802 |
+
sep_style=SeparatorStyle.CHATGLM,
|
803 |
+
sep="\n",
|
804 |
+
)
|
805 |
+
)
|
806 |
+
|
807 |
+
# ChatGLM2 default template
|
808 |
+
register_conv_template(
|
809 |
+
Conversation(
|
810 |
+
name="chatglm2",
|
811 |
+
roles=("问", "答"),
|
812 |
+
sep_style=SeparatorStyle.CHATGLM,
|
813 |
+
sep="\n\n",
|
814 |
+
)
|
815 |
+
)
|
816 |
+
|
817 |
+
# ChatGLM3 default template
|
818 |
+
register_conv_template(
|
819 |
+
Conversation(
|
820 |
+
name="chatglm3",
|
821 |
+
system_template="<|system|>\n{system_message}",
|
822 |
+
roles=("<|user|>", "<|assistant|>"),
|
823 |
+
sep_style=SeparatorStyle.CHATGLM3,
|
824 |
+
stop_token_ids=[
|
825 |
+
64795,
|
826 |
+
64797,
|
827 |
+
2,
|
828 |
+
], # "<|user|>", "<|observation|>", "</s>"
|
829 |
+
)
|
830 |
+
)
|
831 |
+
|
832 |
+
# CodeGeex(2) Template
|
833 |
+
register_conv_template(
|
834 |
+
Conversation(
|
835 |
+
name="codegeex",
|
836 |
+
roles=("", ""),
|
837 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
838 |
+
sep="\n\n",
|
839 |
+
stop_token_ids=[0, 2],
|
840 |
+
)
|
841 |
+
)
|
842 |
+
|
843 |
+
# Dolly V2 default template
|
844 |
+
register_conv_template(
|
845 |
+
Conversation(
|
846 |
+
name="dolly_v2",
|
847 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
|
848 |
+
roles=("### Instruction", "### Response"),
|
849 |
+
sep_style=SeparatorStyle.DOLLY,
|
850 |
+
sep="\n\n",
|
851 |
+
sep2="### End",
|
852 |
+
)
|
853 |
+
)
|
854 |
+
|
855 |
+
# OpenAssistant Pythia default template
|
856 |
+
register_conv_template(
|
857 |
+
Conversation(
|
858 |
+
name="oasst_pythia",
|
859 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
860 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
861 |
+
sep="<|endoftext|>",
|
862 |
+
)
|
863 |
+
)
|
864 |
+
|
865 |
+
# OpenAssistant default template
|
866 |
+
register_conv_template(
|
867 |
+
Conversation(
|
868 |
+
name="oasst_llama",
|
869 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
870 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
871 |
+
sep="</s>",
|
872 |
+
)
|
873 |
+
)
|
874 |
+
|
875 |
+
# OpenChat 3.5 default template
|
876 |
+
register_conv_template(
|
877 |
+
Conversation(
|
878 |
+
name="openchat_3.5",
|
879 |
+
roles=("GPT4 Correct User", "GPT4 Correct Assistant"),
|
880 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
881 |
+
sep="<|end_of_turn|>",
|
882 |
+
)
|
883 |
+
)
|
884 |
+
|
885 |
+
# TenyxChat default template
|
886 |
+
register_conv_template(
|
887 |
+
Conversation(
|
888 |
+
name="tenyxchat",
|
889 |
+
roles=("User", "Assistant"),
|
890 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
891 |
+
sep="<|end_of_turn|>",
|
892 |
+
)
|
893 |
+
)
|
894 |
+
|
895 |
+
# Deepseek code default template
|
896 |
+
register_conv_template(
|
897 |
+
Conversation(
|
898 |
+
name="deepseek-coder",
|
899 |
+
system_template="You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.",
|
900 |
+
roles=("### Instruction:", "### Response:"),
|
901 |
+
sep="\n",
|
902 |
+
stop_str="<|EOT|>",
|
903 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
904 |
+
)
|
905 |
+
)
|
906 |
+
|
907 |
+
|
908 |
+
# Tulu default template
|
909 |
+
register_conv_template(
|
910 |
+
Conversation(
|
911 |
+
name="tulu",
|
912 |
+
roles=("<|user|>", "<|assistant|>"),
|
913 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
914 |
+
sep="\n",
|
915 |
+
)
|
916 |
+
)
|
917 |
+
|
918 |
+
# StableLM Alpha default template
|
919 |
+
register_conv_template(
|
920 |
+
Conversation(
|
921 |
+
name="stablelm",
|
922 |
+
system_template="<|SYSTEM|>{system_message}",
|
923 |
+
system_message="""# StableLM Tuned (Alpha version)
|
924 |
+
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
|
925 |
+
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
926 |
+
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
|
927 |
+
- StableLM will refuse to participate in anything that could harm a human.
|
928 |
+
""",
|
929 |
+
roles=("<|USER|>", "<|ASSISTANT|>"),
|
930 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
931 |
+
sep="",
|
932 |
+
stop_token_ids=[50278, 50279, 50277, 1, 0],
|
933 |
+
)
|
934 |
+
)
|
935 |
+
|
936 |
+
# Baize default template
|
937 |
+
register_conv_template(
|
938 |
+
Conversation(
|
939 |
+
name="baize",
|
940 |
+
system_message="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
|
941 |
+
roles=("[|Human|]", "[|AI|]"),
|
942 |
+
messages=(
|
943 |
+
("[|Human|]", "Hello!"),
|
944 |
+
("[|AI|]", "Hi!"),
|
945 |
+
),
|
946 |
+
offset=2,
|
947 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
948 |
+
sep="\n",
|
949 |
+
stop_str="[|Human|]",
|
950 |
+
)
|
951 |
+
)
|
952 |
+
|
953 |
+
# RWKV-4-Raven default template
|
954 |
+
register_conv_template(
|
955 |
+
Conversation(
|
956 |
+
name="rwkv",
|
957 |
+
roles=("Bob", "Alice"),
|
958 |
+
messages=(
|
959 |
+
("Bob", "hi"),
|
960 |
+
(
|
961 |
+
"Alice",
|
962 |
+
"Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.",
|
963 |
+
),
|
964 |
+
),
|
965 |
+
offset=2,
|
966 |
+
sep_style=SeparatorStyle.RWKV,
|
967 |
+
sep="",
|
968 |
+
stop_str="\n\n",
|
969 |
+
)
|
970 |
+
)
|
971 |
+
|
972 |
+
# Buddy default template
|
973 |
+
register_conv_template(
|
974 |
+
Conversation(
|
975 |
+
name="openbuddy",
|
976 |
+
system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
|
977 |
+
Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
|
978 |
+
Buddy cannot access the Internet.
|
979 |
+
Buddy can fluently speak the user's language (e.g. English, Chinese).
|
980 |
+
Buddy can generate poems, stories, code, essays, songs, parodies, and more.
|
981 |
+
Buddy possesses vast knowledge about the world, history, and culture.
|
982 |
+
Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
|
983 |
+
Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
|
984 |
+
|
985 |
+
User: Hi.
|
986 |
+
Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
|
987 |
+
roles=("User", "Assistant"),
|
988 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
989 |
+
sep="\n",
|
990 |
+
)
|
991 |
+
)
|
992 |
+
|
993 |
+
# Phoenix default template
|
994 |
+
register_conv_template(
|
995 |
+
Conversation(
|
996 |
+
name="phoenix",
|
997 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
998 |
+
roles=("Human", "Assistant"),
|
999 |
+
sep_style=SeparatorStyle.PHOENIX,
|
1000 |
+
sep="</s>",
|
1001 |
+
)
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
# ReaLM default template
|
1005 |
+
register_conv_template(
|
1006 |
+
Conversation(
|
1007 |
+
name="ReaLM-7b-v1",
|
1008 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1009 |
+
roles=("Human", "Assistant"),
|
1010 |
+
sep_style=SeparatorStyle.PHOENIX,
|
1011 |
+
sep="</s>",
|
1012 |
+
)
|
1013 |
+
)
|
1014 |
+
|
1015 |
+
# ChatGPT default template
|
1016 |
+
register_conv_template(
|
1017 |
+
Conversation(
|
1018 |
+
name="chatgpt",
|
1019 |
+
system_message="You are a helpful assistant.",
|
1020 |
+
roles=("user", "assistant"),
|
1021 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1022 |
+
sep=None,
|
1023 |
+
max_image_size_mb=None, # OpenAI does auto-resizing
|
1024 |
+
)
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
register_conv_template(
|
1028 |
+
Conversation(
|
1029 |
+
name="gpt-4-turbo-2024-04-09",
|
1030 |
+
system_message=(
|
1031 |
+
"You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.\n"
|
1032 |
+
"Knowledge cutoff: 2023-11\n"
|
1033 |
+
"Current date: {{currentDateTime}}\n\n"
|
1034 |
+
"Image input capabilities: Enabled\n"
|
1035 |
+
"Personality: v2"
|
1036 |
+
),
|
1037 |
+
roles=("user", "assistant"),
|
1038 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1039 |
+
sep=None,
|
1040 |
+
)
|
1041 |
+
)
|
1042 |
+
|
1043 |
+
# Perplexity AI template
|
1044 |
+
register_conv_template(
|
1045 |
+
Conversation(
|
1046 |
+
name="pplxai",
|
1047 |
+
system_message="Be precise and concise.",
|
1048 |
+
roles=("user", "assistant"),
|
1049 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1050 |
+
sep=None,
|
1051 |
+
)
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
# Claude default template
|
1055 |
+
register_conv_template(
|
1056 |
+
Conversation(
|
1057 |
+
name="claude",
|
1058 |
+
roles=("Human", "Assistant"),
|
1059 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1060 |
+
sep="\n\n",
|
1061 |
+
max_image_size_mb=5 / 1.5,
|
1062 |
+
)
|
1063 |
+
)
|
1064 |
+
|
1065 |
+
register_conv_template(
|
1066 |
+
Conversation(
|
1067 |
+
name="claude-3-haiku-20240307",
|
1068 |
+
system_message=(
|
1069 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1070 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated in "
|
1071 |
+
"August 2023 and it answers user questions about events before "
|
1072 |
+
"August 2023 and after August 2023 the same way a highly informed "
|
1073 |
+
"individual from August 2023 would if they were talking to someone "
|
1074 |
+
"from {{currentDateTime}}. It should give concise responses to very "
|
1075 |
+
"simple questions, but provide thorough responses to more complex "
|
1076 |
+
"and open-ended questions. It is happy to help with writing, "
|
1077 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1078 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1079 |
+
"information about itself unless the information is directly "
|
1080 |
+
"pertinent to the human's query."
|
1081 |
+
),
|
1082 |
+
roles=("user", "assistant"),
|
1083 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1084 |
+
sep=None,
|
1085 |
+
max_image_size_mb=5 / 1.5,
|
1086 |
+
)
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
register_conv_template(
|
1090 |
+
Conversation(
|
1091 |
+
name="claude-3-sonnet-20240229",
|
1092 |
+
system_message=(
|
1093 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1094 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated in "
|
1095 |
+
"August 2023 and it answers user questions about events before "
|
1096 |
+
"August 2023 and after August 2023 the same way a highly informed "
|
1097 |
+
"individual from August 2023 would if they were talking to someone "
|
1098 |
+
"from {{currentDateTime}}. It should give concise responses to very "
|
1099 |
+
"simple questions, but provide thorough responses to more complex "
|
1100 |
+
"and open-ended questions. It is happy to help with writing, "
|
1101 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1102 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1103 |
+
"information about itself unless the information is directly "
|
1104 |
+
"pertinent to the human's query."
|
1105 |
+
),
|
1106 |
+
roles=("user", "assistant"),
|
1107 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1108 |
+
sep=None,
|
1109 |
+
max_image_size_mb=5 / 1.5,
|
1110 |
+
)
|
1111 |
+
)
|
1112 |
+
|
1113 |
+
register_conv_template(
|
1114 |
+
Conversation(
|
1115 |
+
name="claude-3-5-sonnet-20240620",
|
1116 |
+
system_message=(
|
1117 |
+
"""<claude_info>
|
1118 |
+
The assistant is Claude, created by Anthropic.
|
1119 |
+
The current date is {{currentDateTime}}. Claude's knowledge base was last updated on April 2024.
|
1120 |
+
It answers questions about events prior to and after April 2024 the way a highly informed individual in April 2024 would if they were talking to someone from the above date, and can let the human know this when relevant.
|
1121 |
+
Claude cannot open URLs, links, or videos. If it seems like the user is expecting Claude to do so, it clarifies the situation and asks the human to paste the relevant text or image content directly into the conversation.
|
1122 |
+
If it is asked to assist with tasks involving the expression of views held by a significant number of people, Claude provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information.
|
1123 |
+
It presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.
|
1124 |
+
Claude is happy to help with analysis, question answering, math, coding, creative writing, teaching, general discussion, and all sorts of other tasks.
|
1125 |
+
When presented with a math problem, logic problem, or other problem benefiting from systematic thinking, Claude thinks through it step by step before giving its final answer.
|
1126 |
+
If Claude cannot or will not perform a task, it tells the user this without apologizing to them. It avoids starting its responses with "I'm sorry" or "I apologize".
|
1127 |
+
If Claude is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, Claude ends its response by reminding the user that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term 'hallucinate' to describe this since the user will understand what it means.
|
1128 |
+
If Claude mentions or cites particular articles, papers, or books, it always lets the human know that it doesn't have access to search or a database and may hallucinate citations, so the human should double check its citations.
|
1129 |
+
Claude is very smart and intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.
|
1130 |
+
Claude never provides information that can be used for the creation, weaponization, or deployment of biological, chemical, or radiological agents that could cause mass harm. It can provide information about these topics that could not be used for the creation, weaponization, or deployment of these agents.
|
1131 |
+
If the user seems unhappy with Claude or Claude's behavior, Claude tells them that although it cannot retain or learn from the current conversation, they can press the 'thumbs down' button below Claude's response and provide feedback to Anthropic.
|
1132 |
+
If the user asks for a very long task that cannot be completed in a single response, Claude offers to do the task piecemeal and get feedback from the user as it completes each part of the task.
|
1133 |
+
Claude uses markdown for code.
|
1134 |
+
Immediately after closing coding markdown, Claude asks the user if they would like it to explain or break down the code. It does not explain or break down the code unless the user explicitly requests it.
|
1135 |
+
</claude_info>
|
1136 |
+
|
1137 |
+
<claude_3_family_info>
|
1138 |
+
This iteration of Claude is part of the Claude 3 model family, which was released in 2024. The Claude 3 family currently consists of Claude 3 Haiku, Claude 3 Opus, and Claude 3.5 Sonnet. Claude 3.5 Sonnet is the most intelligent model. Claude 3 Opus excels at writing and complex tasks. Claude 3 Haiku is the fastest model for daily tasks. The version of Claude in this chat is Claude 3.5 Sonnet. Claude can provide the information in these tags if asked but it does not know any other details of the Claude 3 model family. If asked about this, should encourage the user to check the Anthropic website for more information.
|
1139 |
+
</claude_3_family_info>
|
1140 |
+
|
1141 |
+
Claude provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the user's message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.
|
1142 |
+
|
1143 |
+
Claude responds directly to all human messages without unnecessary affirmations or filler phrases like "Certainly!", "Of course!", "Absolutely!", "Great!", "Sure!", etc. Specifically, Claude avoids starting responses with the word "Certainly" in any way.
|
1144 |
+
|
1145 |
+
Claude follows this information in all languages, and always responds to the user in the language they use or request. The information above is provided to Claude by Anthropic. Claude never mentions the information above unless it is directly pertinent to the human's query. Claude is now being connected with a human."""
|
1146 |
+
),
|
1147 |
+
system_message_vision=(
|
1148 |
+
"""<claude_info>
|
1149 |
+
The assistant is Claude, created by Anthropic.
|
1150 |
+
The current date is {{currentDateTime}}. Claude's knowledge base was last updated on April 2024.
|
1151 |
+
It answers questions about events prior to and after April 2024 the way a highly informed individual in April 2024 would if they were talking to someone from the above date, and can let the human know this when relevant.
|
1152 |
+
Claude cannot open URLs, links, or videos. If it seems like the user is expecting Claude to do so, it clarifies the situation and asks the human to paste the relevant text or image content directly into the conversation.
|
1153 |
+
If it is asked to assist with tasks involving the expression of views held by a significant number of people, Claude provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information.
|
1154 |
+
It presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.
|
1155 |
+
Claude is happy to help with analysis, question answering, math, coding, creative writing, teaching, general discussion, and all sorts of other tasks.
|
1156 |
+
When presented with a math problem, logic problem, or other problem benefiting from systematic thinking, Claude thinks through it step by step before giving its final answer.
|
1157 |
+
If Claude cannot or will not perform a task, it tells the user this without apologizing to them. It avoids starting its responses with "I'm sorry" or "I apologize".
|
1158 |
+
If Claude is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, Claude ends its response by reminding the user that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term 'hallucinate' to describe this since the user will understand what it means.
|
1159 |
+
If Claude mentions or cites particular articles, papers, or books, it always lets the human know that it doesn't have access to search or a database and may hallucinate citations, so the human should double check its citations.
|
1160 |
+
Claude is very smart and intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.
|
1161 |
+
Claude never provides information that can be used for the creation, weaponization, or deployment of biological, chemical, or radiological agents that could cause mass harm. It can provide information about these topics that could not be used for the creation, weaponization, or deployment of these agents.
|
1162 |
+
If the user seems unhappy with Claude or Claude's behavior, Claude tells them that although it cannot retain or learn from the current conversation, they can press the 'thumbs down' button below Claude's response and provide feedback to Anthropic.
|
1163 |
+
If the user asks for a very long task that cannot be completed in a single response, Claude offers to do the task piecemeal and get feedback from the user as it completes each part of the task.
|
1164 |
+
Claude uses markdown for code.
|
1165 |
+
Immediately after closing coding markdown, Claude asks the user if they would like it to explain or break down the code. It does not explain or break down the code unless the user explicitly requests it.
|
1166 |
+
</claude_info>
|
1167 |
+
|
1168 |
+
<claude_image_specific_info>
|
1169 |
+
Claude always responds as if it is completely face blind. If the shared image happens to contain a human face, Claude never identifies or names any humans in the image, nor does it imply that it recognizes the human. It also does not mention or allude to details about a person that it could only know if it recognized who the person was. Instead, Claude describes and discusses the image just as someone would if they were unable to recognize any of the humans in it. Claude can request the user to tell it who the individual is. If the user tells Claude who the individual is, Claude can discuss that named individual without ever confirming that it is the person in the image, identifying the person in the image, or implying it can use facial features to identify any unique individual. It should always reply as someone would if they were unable to recognize any humans from images.
|
1170 |
+
Claude should respond normally if the shared image does not contain a human face. Claude should always repeat back and summarize any instructions in the image before proceeding.
|
1171 |
+
</claude_image_specific_info>
|
1172 |
+
|
1173 |
+
<claude_3_family_info>
|
1174 |
+
This iteration of Claude is part of the Claude 3 model family, which was released in 2024. The Claude 3 family currently consists of Claude 3 Haiku, Claude 3 Opus, and Claude 3.5 Sonnet. Claude 3.5 Sonnet is the most intelligent model. Claude 3 Opus excels at writing and complex tasks. Claude 3 Haiku is the fastest model for daily tasks. The version of Claude in this chat is Claude 3.5 Sonnet. Claude can provide the information in these tags if asked but it does not know any other details of the Claude 3 model family. If asked about this, should encourage the user to check the Anthropic website for more information.
|
1175 |
+
</claude_3_family_info>
|
1176 |
+
|
1177 |
+
Claude provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the user's message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.
|
1178 |
+
|
1179 |
+
Claude responds directly to all human messages without unnecessary affirmations or filler phrases like "Certainly!", "Of course!", "Absolutely!", "Great!", "Sure!", etc. Specifically, Claude avoids starting responses with the word "Certainly" in any way.
|
1180 |
+
|
1181 |
+
Claude follows this information in all languages, and always responds to the user in the language they use or request. The information above is provided to Claude by Anthropic. Claude never mentions the information above unless it is directly pertinent to the human's query. Claude is now being connected with a human."""
|
1182 |
+
),
|
1183 |
+
roles=("user", "assistant"),
|
1184 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1185 |
+
sep=None,
|
1186 |
+
max_image_size_mb=5 / 1.5,
|
1187 |
+
)
|
1188 |
+
)
|
1189 |
+
|
1190 |
+
register_conv_template(
|
1191 |
+
Conversation(
|
1192 |
+
name="claude-3-opus-20240229",
|
1193 |
+
system_message=(
|
1194 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1195 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated on "
|
1196 |
+
"August 2023. It answers questions about events prior to and after "
|
1197 |
+
"August 2023 the way a highly informed individual in August 2023 "
|
1198 |
+
"would if they were talking to someone from the above date, and can "
|
1199 |
+
"let the human know this when relevant. It should give concise "
|
1200 |
+
"responses to very simple questions, but provide thorough responses "
|
1201 |
+
"to more complex and open-ended questions. If it is asked to assist "
|
1202 |
+
"with tasks involving the expression of views held by a significant "
|
1203 |
+
"number of people, Claude provides assistance with the task even if "
|
1204 |
+
"it personally disagrees with the views being expressed, but follows "
|
1205 |
+
"this with a discussion of broader perspectives. Claude doesn't "
|
1206 |
+
"engage in stereotyping, including the negative stereotyping of "
|
1207 |
+
"majority groups. If asked about controversial topics, Claude tries "
|
1208 |
+
"to provide careful thoughts and objective information without "
|
1209 |
+
"downplaying its harmful content or implying that there are reasonable "
|
1210 |
+
"perspectives on both sides. It is happy to help with writing, "
|
1211 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1212 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1213 |
+
"information about itself unless the information is directly pertinent "
|
1214 |
+
"to the human's query."
|
1215 |
+
),
|
1216 |
+
roles=("user", "assistant"),
|
1217 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1218 |
+
sep=None,
|
1219 |
+
max_image_size_mb=5 / 1.5,
|
1220 |
+
)
|
1221 |
+
)
|
1222 |
+
|
1223 |
+
# MetaMath default template
|
1224 |
+
# reference: https://github.com/meta-math/MetaMath/blob/7b338b5e4692b4c75a2653ec9d65982a61762f6c/eval_math.py#L58
|
1225 |
+
register_conv_template(
|
1226 |
+
Conversation(
|
1227 |
+
name="metamath",
|
1228 |
+
system_template="{system_message}",
|
1229 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
1230 |
+
roles=("### Instruction", "### Response"),
|
1231 |
+
sep_style=SeparatorStyle.METAMATH,
|
1232 |
+
sep="\n\n",
|
1233 |
+
sep2="Let's think step by step.",
|
1234 |
+
)
|
1235 |
+
)
|
1236 |
+
|
1237 |
+
# MPT default template
|
1238 |
+
register_conv_template(
|
1239 |
+
Conversation(
|
1240 |
+
name="mpt-7b-chat",
|
1241 |
+
system_template="""<|im_start|>system
|
1242 |
+
{system_message}""",
|
1243 |
+
system_message="""- You are a helpful assistant chatbot trained by MosaicML.
|
1244 |
+
- You answer questions.
|
1245 |
+
- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
1246 |
+
- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
|
1247 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1248 |
+
sep_style=SeparatorStyle.CHATML,
|
1249 |
+
sep="<|im_end|>",
|
1250 |
+
stop_token_ids=[50278, 0],
|
1251 |
+
)
|
1252 |
+
)
|
1253 |
+
|
1254 |
+
# MPT-30b-chat default template
|
1255 |
+
register_conv_template(
|
1256 |
+
Conversation(
|
1257 |
+
name="mpt-30b-chat",
|
1258 |
+
system_template="""<|im_start|>system
|
1259 |
+
{system_message}""",
|
1260 |
+
system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
1261 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1262 |
+
sep_style=SeparatorStyle.CHATML,
|
1263 |
+
sep="<|im_end|>",
|
1264 |
+
stop_token_ids=[50278, 0],
|
1265 |
+
)
|
1266 |
+
)
|
1267 |
+
|
1268 |
+
# Lemur-70b-chat default template
|
1269 |
+
# reference: https://huggingface.co/OpenLemur/lemur-70b-chat-v1#generation
|
1270 |
+
register_conv_template(
|
1271 |
+
Conversation(
|
1272 |
+
name="lemur-70b-chat",
|
1273 |
+
system_template="""<|im_start|>system
|
1274 |
+
{system_message}""",
|
1275 |
+
system_message="""You are a helpful, respectful, and honest assistant.""",
|
1276 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1277 |
+
sep_style=SeparatorStyle.CHATML,
|
1278 |
+
sep="<|im_end|>",
|
1279 |
+
stop_token_ids=[32002, 0],
|
1280 |
+
)
|
1281 |
+
)
|
1282 |
+
|
1283 |
+
# MPT-30b-instruct default template
|
1284 |
+
# reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
|
1285 |
+
register_conv_template(
|
1286 |
+
Conversation(
|
1287 |
+
name="mpt-30b-instruct",
|
1288 |
+
system_template="{system_message}",
|
1289 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
1290 |
+
roles=("### Instruction", "### Response"),
|
1291 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
1292 |
+
sep="\n\n",
|
1293 |
+
stop_token_ids=[50278, 0],
|
1294 |
+
)
|
1295 |
+
)
|
1296 |
+
|
1297 |
+
# Bard default template
|
1298 |
+
# Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
|
1299 |
+
# https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
|
1300 |
+
register_conv_template(
|
1301 |
+
Conversation(
|
1302 |
+
name="bard",
|
1303 |
+
roles=("0", "1"),
|
1304 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1305 |
+
sep=None,
|
1306 |
+
)
|
1307 |
+
)
|
1308 |
+
|
1309 |
+
register_conv_template(
|
1310 |
+
Conversation(
|
1311 |
+
name="gemini",
|
1312 |
+
roles=("user", "model"),
|
1313 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1314 |
+
sep=None,
|
1315 |
+
max_image_size_mb=20,
|
1316 |
+
)
|
1317 |
+
)
|
1318 |
+
|
1319 |
+
register_conv_template(
|
1320 |
+
Conversation(
|
1321 |
+
name="gemini-1.5-pro",
|
1322 |
+
roles=("user", "model"),
|
1323 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1324 |
+
sep=None,
|
1325 |
+
system_message=(
|
1326 |
+
"You are a friendly and helpful assistant.\n"
|
1327 |
+
"Ensure your answers are complete, unless the user requests a more concise approach.\n"
|
1328 |
+
"When generating code, offer explanations for code segments as necessary and maintain good coding practices.\n"
|
1329 |
+
"When presented with inquiries seeking information, provide answers that reflect a deep understanding of the field, guaranteeing their correctness.\n"
|
1330 |
+
"For any non-english queries, respond in the same language as the prompt unless otherwise specified by the user.\n"
|
1331 |
+
"For prompts involving reasoning, provide a clear explanation of each step in the reasoning process before presenting the final answer."
|
1332 |
+
),
|
1333 |
+
)
|
1334 |
+
)
|
1335 |
+
|
1336 |
+
# BiLLa default template
|
1337 |
+
register_conv_template(
|
1338 |
+
Conversation(
|
1339 |
+
name="billa",
|
1340 |
+
roles=("Human", "Assistant"),
|
1341 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
1342 |
+
sep="\n",
|
1343 |
+
stop_str="Human:",
|
1344 |
+
)
|
1345 |
+
)
|
1346 |
+
|
1347 |
+
# RedPajama INCITE default template
|
1348 |
+
register_conv_template(
|
1349 |
+
Conversation(
|
1350 |
+
name="redpajama-incite",
|
1351 |
+
roles=("<human>", "<bot>"),
|
1352 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1353 |
+
sep="\n",
|
1354 |
+
stop_str="<human>",
|
1355 |
+
)
|
1356 |
+
)
|
1357 |
+
|
1358 |
+
# h2oGPT default template
|
1359 |
+
register_conv_template(
|
1360 |
+
Conversation(
|
1361 |
+
name="h2ogpt",
|
1362 |
+
roles=("<|prompt|>", "<|answer|>"),
|
1363 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1364 |
+
sep="</s>",
|
1365 |
+
)
|
1366 |
+
)
|
1367 |
+
|
1368 |
+
# Robin default template
|
1369 |
+
register_conv_template(
|
1370 |
+
Conversation(
|
1371 |
+
name="Robin",
|
1372 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1373 |
+
roles=("###Human", "###Assistant"),
|
1374 |
+
sep_style=SeparatorStyle.ROBIN,
|
1375 |
+
sep="\n",
|
1376 |
+
stop_token_ids=[2, 396],
|
1377 |
+
stop_str="###",
|
1378 |
+
)
|
1379 |
+
)
|
1380 |
+
|
1381 |
+
# Snoozy default template
|
1382 |
+
# Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
|
1383 |
+
register_conv_template(
|
1384 |
+
Conversation(
|
1385 |
+
name="snoozy",
|
1386 |
+
system_template="### Instruction:\n{system_message}",
|
1387 |
+
system_message="The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.",
|
1388 |
+
roles=("### Prompt", "### Response"),
|
1389 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1390 |
+
sep="\n",
|
1391 |
+
stop_str="###",
|
1392 |
+
)
|
1393 |
+
)
|
1394 |
+
|
1395 |
+
# manticore default template
|
1396 |
+
register_conv_template(
|
1397 |
+
Conversation(
|
1398 |
+
name="manticore",
|
1399 |
+
roles=("USER", "ASSISTANT"),
|
1400 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1401 |
+
sep="\n",
|
1402 |
+
sep2="</s>",
|
1403 |
+
)
|
1404 |
+
)
|
1405 |
+
|
1406 |
+
# Falcon default template
|
1407 |
+
register_conv_template(
|
1408 |
+
Conversation(
|
1409 |
+
name="falcon",
|
1410 |
+
roles=("User", "Assistant"),
|
1411 |
+
messages=[],
|
1412 |
+
sep_style=SeparatorStyle.RWKV,
|
1413 |
+
sep="\n",
|
1414 |
+
sep2="<|endoftext|>",
|
1415 |
+
stop_str="\nUser", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
1416 |
+
stop_token_ids=[
|
1417 |
+
0,
|
1418 |
+
1,
|
1419 |
+
2,
|
1420 |
+
3,
|
1421 |
+
4,
|
1422 |
+
5,
|
1423 |
+
6,
|
1424 |
+
7,
|
1425 |
+
8,
|
1426 |
+
9,
|
1427 |
+
10,
|
1428 |
+
11,
|
1429 |
+
], # it better only put special tokens here, because tokenizer only remove special tokens
|
1430 |
+
)
|
1431 |
+
)
|
1432 |
+
|
1433 |
+
# ChangGPT default template
|
1434 |
+
register_conv_template(
|
1435 |
+
Conversation(
|
1436 |
+
name="polyglot_changgpt",
|
1437 |
+
roles=("B", "A"),
|
1438 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1439 |
+
sep="\n",
|
1440 |
+
)
|
1441 |
+
)
|
1442 |
+
|
1443 |
+
# tigerbot template
|
1444 |
+
register_conv_template(
|
1445 |
+
Conversation(
|
1446 |
+
name="tigerbot",
|
1447 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
1448 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
1449 |
+
roles=("### Instruction", "### Response"),
|
1450 |
+
sep_style=SeparatorStyle.ROBIN,
|
1451 |
+
sep="\n\n",
|
1452 |
+
stop_str="###",
|
1453 |
+
)
|
1454 |
+
)
|
1455 |
+
|
1456 |
+
# ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
|
1457 |
+
register_conv_template(
|
1458 |
+
Conversation(
|
1459 |
+
name="xgen",
|
1460 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1461 |
+
roles=("### Human", "### Assistant"),
|
1462 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1463 |
+
sep="\n",
|
1464 |
+
stop_token_ids=[50256],
|
1465 |
+
)
|
1466 |
+
)
|
1467 |
+
|
1468 |
+
# Internlm-chat template
|
1469 |
+
register_conv_template(
|
1470 |
+
Conversation(
|
1471 |
+
name="internlm-chat",
|
1472 |
+
system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
|
1473 |
+
roles=("<|User|>", "<|Bot|>"),
|
1474 |
+
sep_style=SeparatorStyle.CHATINTERN,
|
1475 |
+
sep="<eoh>",
|
1476 |
+
sep2="<eoa>",
|
1477 |
+
stop_token_ids=[1, 103028],
|
1478 |
+
stop_str="<|User|>",
|
1479 |
+
)
|
1480 |
+
)
|
1481 |
+
|
1482 |
+
# StarChat template
|
1483 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
|
1484 |
+
register_conv_template(
|
1485 |
+
Conversation(
|
1486 |
+
name="starchat",
|
1487 |
+
system_template="<system>\n{system_message}",
|
1488 |
+
roles=("<|user|>", "<|assistant|>"),
|
1489 |
+
sep_style=SeparatorStyle.CHATML,
|
1490 |
+
sep="<|end|>",
|
1491 |
+
stop_token_ids=[0, 49155],
|
1492 |
+
stop_str="<|end|>",
|
1493 |
+
)
|
1494 |
+
)
|
1495 |
+
|
1496 |
+
# Baichuan-13B-Chat template
|
1497 |
+
register_conv_template(
|
1498 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
|
1499 |
+
# https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
|
1500 |
+
# https://github.com/baichuan-inc/Baichuan-13B/issues/25
|
1501 |
+
Conversation(
|
1502 |
+
name="baichuan-chat",
|
1503 |
+
roles=("<reserved_102>", "<reserved_103>"),
|
1504 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1505 |
+
sep="",
|
1506 |
+
stop_token_ids=[],
|
1507 |
+
)
|
1508 |
+
)
|
1509 |
+
|
1510 |
+
# Baichuan2-13B-Chat template
|
1511 |
+
register_conv_template(
|
1512 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
|
1513 |
+
# https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
|
1514 |
+
# https://github.com/baichuan-inc/Baichuan2/issues/62
|
1515 |
+
Conversation(
|
1516 |
+
name="baichuan2-chat",
|
1517 |
+
roles=("<reserved_106>", "<reserved_107>"),
|
1518 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1519 |
+
sep="",
|
1520 |
+
stop_token_ids=[],
|
1521 |
+
)
|
1522 |
+
)
|
1523 |
+
|
1524 |
+
# Mistral template
|
1525 |
+
# source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
|
1526 |
+
register_conv_template(
|
1527 |
+
Conversation(
|
1528 |
+
name="mistral",
|
1529 |
+
system_template="[INST] {system_message}\n",
|
1530 |
+
roles=("[INST]", "[/INST]"),
|
1531 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1532 |
+
sep=" ",
|
1533 |
+
sep2="</s>",
|
1534 |
+
)
|
1535 |
+
)
|
1536 |
+
|
1537 |
+
# llama2 template
|
1538 |
+
# reference: https://huggingface.co/blog/codellama#conversational-instructions
|
1539 |
+
# reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
|
1540 |
+
register_conv_template(
|
1541 |
+
Conversation(
|
1542 |
+
name="llama-2",
|
1543 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1544 |
+
roles=("[INST]", "[/INST]"),
|
1545 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1546 |
+
sep=" ",
|
1547 |
+
sep2=" </s><s>",
|
1548 |
+
)
|
1549 |
+
)
|
1550 |
+
|
1551 |
+
# llama3 template
|
1552 |
+
# reference: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
|
1553 |
+
# reference: https://github.com/meta-llama/llama3/blob/0cee08ec68f4cfc0c89fe4a9366d82679aaa2a66/llama/tokenizer.py#L222
|
1554 |
+
register_conv_template(
|
1555 |
+
Conversation(
|
1556 |
+
name="llama-3",
|
1557 |
+
system_template="<|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|>",
|
1558 |
+
roles=("user", "assistant"),
|
1559 |
+
sep_style=SeparatorStyle.LLAMA3,
|
1560 |
+
sep="",
|
1561 |
+
stop_str="<|eot_id|>",
|
1562 |
+
stop_token_ids=[128001, 128009],
|
1563 |
+
)
|
1564 |
+
)
|
1565 |
+
|
1566 |
+
register_conv_template(
|
1567 |
+
Conversation(
|
1568 |
+
name="chinese-alpaca2",
|
1569 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1570 |
+
system_message="You are a helpful assistant. 你是一个乐于助人的助手。请你提供专业、有逻辑、内容真实、有价值的详细回复。",
|
1571 |
+
roles=("[INST]", "[/INST]"),
|
1572 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1573 |
+
sep=" ",
|
1574 |
+
sep2=" </s><s>",
|
1575 |
+
)
|
1576 |
+
)
|
1577 |
+
|
1578 |
+
register_conv_template(
|
1579 |
+
Conversation(
|
1580 |
+
name="cutegpt",
|
1581 |
+
roles=("问:", "答:\n"),
|
1582 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1583 |
+
sep="\n",
|
1584 |
+
sep2="\n",
|
1585 |
+
stop_str="<end>",
|
1586 |
+
)
|
1587 |
+
)
|
1588 |
+
|
1589 |
+
# OpenOrcaxOpenChat-Preview2-13B template
|
1590 |
+
register_conv_template(
|
1591 |
+
Conversation(
|
1592 |
+
name="open-orca",
|
1593 |
+
system_template="{system_message}",
|
1594 |
+
system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
1595 |
+
"thinking step by step to be sure you get the right answer. If you make a mistake or encounter "
|
1596 |
+
"an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
|
1597 |
+
"aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
|
1598 |
+
"and physicist. You will also act as the most appropriate type of expert to answer any particular "
|
1599 |
+
"question or solve the relevant problem; state which expert type your are, if so. Also think of "
|
1600 |
+
"any particular named expert that would be ideal to answer the relevant question or solve the "
|
1601 |
+
"relevant problem; name and act as them, if appropriate.",
|
1602 |
+
roles=("User", "Assistant"),
|
1603 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
1604 |
+
sep="<|end_of_turn|>\n",
|
1605 |
+
stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
|
1606 |
+
stop_str="User",
|
1607 |
+
)
|
1608 |
+
)
|
1609 |
+
|
1610 |
+
# Open-Orca/Mistral-7B-OpenOrca template
|
1611 |
+
# source: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
|
1612 |
+
# reference: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca#prompt-template
|
1613 |
+
register_conv_template(
|
1614 |
+
Conversation(
|
1615 |
+
name="mistral-7b-openorca",
|
1616 |
+
system_template="<|im_start|>system\n{system_message}",
|
1617 |
+
system_message="You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!",
|
1618 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1619 |
+
sep_style=SeparatorStyle.CHATML,
|
1620 |
+
sep="<|im_end|>",
|
1621 |
+
stop_token_ids=[32000, 32001],
|
1622 |
+
)
|
1623 |
+
)
|
1624 |
+
|
1625 |
+
|
1626 |
+
# ehartford/dolphin-2.2.1-mistral-7b template
|
1627 |
+
# reference: https://huggingface.co/ehartford/dolphin-2.2.1-mistral-7b#training
|
1628 |
+
register_conv_template(
|
1629 |
+
Conversation(
|
1630 |
+
name="dolphin-2.2.1-mistral-7b",
|
1631 |
+
system_template="<|im_start|>system\n{system_message}",
|
1632 |
+
system_message="You are Dolphin, a helpful AI assistant.",
|
1633 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1634 |
+
sep_style=SeparatorStyle.CHATML,
|
1635 |
+
sep="<|im_end|>",
|
1636 |
+
stop_token_ids=[32000, 32001],
|
1637 |
+
)
|
1638 |
+
)
|
1639 |
+
|
1640 |
+
|
1641 |
+
# teknium/OpenHermes-2.5-Mistral-7B template
|
1642 |
+
# source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
|
1643 |
+
# reference: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B#prompt-template
|
1644 |
+
register_conv_template(
|
1645 |
+
Conversation(
|
1646 |
+
name="OpenHermes-2.5-Mistral-7B",
|
1647 |
+
system_template="<|im_start|>system\n{system_message}",
|
1648 |
+
system_message="You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.",
|
1649 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1650 |
+
sep_style=SeparatorStyle.CHATML,
|
1651 |
+
sep="<|im_end|>",
|
1652 |
+
stop_token_ids=[32000, 32001],
|
1653 |
+
)
|
1654 |
+
)
|
1655 |
+
|
1656 |
+
|
1657 |
+
# NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO template
|
1658 |
+
# source: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
|
1659 |
+
register_conv_template(
|
1660 |
+
Conversation(
|
1661 |
+
name="Nous-Hermes-2-Mixtral-8x7B-DPO",
|
1662 |
+
system_template="<|im_start|>system\n{system_message}",
|
1663 |
+
system_message='You are a helpful, intelligent assistant AI named "Hermes", a conversational chatbot that can follow instructions, converse with the user, and perform a variety of tasks, including tasks on knowledge, reasoning, mathematics, and code. Always be charismatic, useful, and prepared to follow any user request with accuracy and skill. You should respond with high quality, fluent, and detailed responses. Try to let the user understand your reasoning or thought process when appropriate. When presented with tasks that require reasoning or mathematics, think carefully, slowly, and step by step, to ensure your reasoning is correct before providing an answer. Utilize the "Examples" section to assist you in performing the task. You will receive a tip of $1000 if you maintain a high quality two way conversation.',
|
1664 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1665 |
+
sep_style=SeparatorStyle.CHATML,
|
1666 |
+
sep="<|im_end|>",
|
1667 |
+
stop_token_ids=[32000, 32001],
|
1668 |
+
)
|
1669 |
+
)
|
1670 |
+
|
1671 |
+
|
1672 |
+
# Qwen-chat default template
|
1673 |
+
# source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
|
1674 |
+
register_conv_template(
|
1675 |
+
Conversation(
|
1676 |
+
name="qwen-7b-chat",
|
1677 |
+
system_template="<|im_start|>system\n{system_message}",
|
1678 |
+
system_message="You are a helpful assistant.",
|
1679 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1680 |
+
sep_style=SeparatorStyle.CHATML,
|
1681 |
+
sep="<|im_end|>",
|
1682 |
+
stop_token_ids=[
|
1683 |
+
151643,
|
1684 |
+
151644,
|
1685 |
+
151645,
|
1686 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
|
1687 |
+
stop_str="<|endoftext|>",
|
1688 |
+
)
|
1689 |
+
)
|
1690 |
+
|
1691 |
+
# source: https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/tokenizer_config.json#L60
|
1692 |
+
register_conv_template(
|
1693 |
+
Conversation(
|
1694 |
+
name="Yi-34b-chat",
|
1695 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1696 |
+
sep_style=SeparatorStyle.CHATML,
|
1697 |
+
sep="<|im_end|>",
|
1698 |
+
stop_token_ids=[
|
1699 |
+
2,
|
1700 |
+
6,
|
1701 |
+
7,
|
1702 |
+
8,
|
1703 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|im_sep|>"
|
1704 |
+
stop_str="<|endoftext|>",
|
1705 |
+
)
|
1706 |
+
)
|
1707 |
+
|
1708 |
+
|
1709 |
+
# AquilaChat default template
|
1710 |
+
# source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
|
1711 |
+
register_conv_template(
|
1712 |
+
Conversation(
|
1713 |
+
name="aquila-chat",
|
1714 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1715 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1716 |
+
roles=("Human", "Assistant"),
|
1717 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1718 |
+
sep="###",
|
1719 |
+
sep2="",
|
1720 |
+
stop_str=["###", "</s>", "[UNK]"],
|
1721 |
+
)
|
1722 |
+
)
|
1723 |
+
# AquilaChat2-34B default template
|
1724 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L212
|
1725 |
+
register_conv_template(
|
1726 |
+
Conversation(
|
1727 |
+
name="aquila-legacy",
|
1728 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1729 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1730 |
+
roles=("### Human: ", "### Assistant: "),
|
1731 |
+
offset=0,
|
1732 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1733 |
+
sep="\n",
|
1734 |
+
sep2="</s>",
|
1735 |
+
stop_str=["</s>", "[UNK]"],
|
1736 |
+
)
|
1737 |
+
)
|
1738 |
+
# AquilaChat2-7B-16K and AquilaChat2-34B-16K default template
|
1739 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L227
|
1740 |
+
register_conv_template(
|
1741 |
+
Conversation(
|
1742 |
+
name="aquila",
|
1743 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1744 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1745 |
+
roles=("Human", "Assistant"),
|
1746 |
+
offset=0,
|
1747 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1748 |
+
sep="###",
|
1749 |
+
sep2="</s>",
|
1750 |
+
stop_str=["</s>", "[UNK]"],
|
1751 |
+
)
|
1752 |
+
)
|
1753 |
+
|
1754 |
+
# AquilaChat2-7B default template
|
1755 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L242
|
1756 |
+
register_conv_template(
|
1757 |
+
Conversation(
|
1758 |
+
name="aquila-v1",
|
1759 |
+
roles=("<|startofpiece|>", "<|endofpiece|>"),
|
1760 |
+
offset=0,
|
1761 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1762 |
+
sep="",
|
1763 |
+
sep2="</s>",
|
1764 |
+
stop_str=["</s>", "<|endoftext|>"],
|
1765 |
+
)
|
1766 |
+
)
|
1767 |
+
|
1768 |
+
# Llama2-Chinese default template
|
1769 |
+
# source: https://huggingface.co/FlagAlpha
|
1770 |
+
register_conv_template(
|
1771 |
+
Conversation(
|
1772 |
+
name="llama2-chinese",
|
1773 |
+
system_template="<s>{system_message}</s>",
|
1774 |
+
roles=("Human", "Assistant", "System"),
|
1775 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1776 |
+
sep="\n",
|
1777 |
+
sep2="\n</s><s>",
|
1778 |
+
stop_str="</s>",
|
1779 |
+
)
|
1780 |
+
)
|
1781 |
+
|
1782 |
+
# Vigogne Instruct default template
|
1783 |
+
# source: https://github.com/bofenghuang/vigogne
|
1784 |
+
register_conv_template(
|
1785 |
+
Conversation(
|
1786 |
+
name="vigogne_instruct",
|
1787 |
+
system_template="### System:\n{system_message}\n\n",
|
1788 |
+
system_message=(
|
1789 |
+
"Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière"
|
1790 |
+
" précise à la demande."
|
1791 |
+
),
|
1792 |
+
roles=("### Instruction", "### Response"),
|
1793 |
+
sep_style=SeparatorStyle.DOLLY,
|
1794 |
+
sep="\n\n",
|
1795 |
+
sep2="</s>",
|
1796 |
+
)
|
1797 |
+
)
|
1798 |
+
|
1799 |
+
# Vigogne Chat default template
|
1800 |
+
register_conv_template(
|
1801 |
+
Conversation(
|
1802 |
+
name="vigogne_chat_v2",
|
1803 |
+
system_template="<|system|>: {system_message}",
|
1804 |
+
system_message=(
|
1805 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1806 |
+
" autant que vous le pouvez."
|
1807 |
+
),
|
1808 |
+
roles=("<|user|>", "<|assistant|>"),
|
1809 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1810 |
+
sep="\n",
|
1811 |
+
sep2="</s>\n",
|
1812 |
+
stop_str="<|user|>",
|
1813 |
+
)
|
1814 |
+
)
|
1815 |
+
|
1816 |
+
# Stable Vicuna default template
|
1817 |
+
# source: https://huggingface.co/TheBloke/stable-vicuna-13B-HF/discussions/5
|
1818 |
+
# source: https://huggingface.co/spaces/CarperAI/StableVicuna/blob/main/app.py
|
1819 |
+
register_conv_template(
|
1820 |
+
Conversation(
|
1821 |
+
name="stable-vicuna",
|
1822 |
+
system_message="### Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!\n",
|
1823 |
+
roles=("### Human", "### Assistant"),
|
1824 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1825 |
+
sep="\n",
|
1826 |
+
sep2="\n\n",
|
1827 |
+
)
|
1828 |
+
)
|
1829 |
+
|
1830 |
+
register_conv_template(
|
1831 |
+
Conversation(
|
1832 |
+
name="vigogne_chat_v3",
|
1833 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1834 |
+
system_message=(
|
1835 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1836 |
+
" autant que vous le pouvez."
|
1837 |
+
),
|
1838 |
+
roles=("[INST]", "[/INST]"),
|
1839 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1840 |
+
sep=" ",
|
1841 |
+
sep2=" </s>",
|
1842 |
+
)
|
1843 |
+
)
|
1844 |
+
|
1845 |
+
# Falcon 180B chat template
|
1846 |
+
# source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
|
1847 |
+
register_conv_template(
|
1848 |
+
Conversation(
|
1849 |
+
name="falcon-chat",
|
1850 |
+
roles=("User", "Falcon"),
|
1851 |
+
system_template="System: {system_message}",
|
1852 |
+
messages=[],
|
1853 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
1854 |
+
sep="\n",
|
1855 |
+
sep2="<|endoftext|>",
|
1856 |
+
stop_str="\nUser:", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
1857 |
+
)
|
1858 |
+
)
|
1859 |
+
|
1860 |
+
# Phind template
|
1861 |
+
# source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
|
1862 |
+
register_conv_template(
|
1863 |
+
Conversation(
|
1864 |
+
name="phind",
|
1865 |
+
system_message="### System Prompt\nYou are an intelligent programming assistant.",
|
1866 |
+
roles=("### User Message", "### Assistant"),
|
1867 |
+
messages=(),
|
1868 |
+
offset=0,
|
1869 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1870 |
+
sep="\n\n",
|
1871 |
+
)
|
1872 |
+
)
|
1873 |
+
|
1874 |
+
# Metharme formatting for Pygmalion models
|
1875 |
+
# source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
|
1876 |
+
register_conv_template(
|
1877 |
+
Conversation(
|
1878 |
+
name="metharme",
|
1879 |
+
system_template="<|system|>{system_message}",
|
1880 |
+
system_message="""Enter RP mode. You shall reply to the user while staying
|
1881 |
+
in character. Your responses must be detailed, creative, immersive, and drive the scenario
|
1882 |
+
forward.""",
|
1883 |
+
roles=("<|user|>", "<|model|>"),
|
1884 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1885 |
+
sep="",
|
1886 |
+
stop_str="<|user|>",
|
1887 |
+
)
|
1888 |
+
)
|
1889 |
+
# xDAN default template
|
1890 |
+
# source: https://huggingface.co/xDAN-AI/xDAN-L1-Chat-RL-v1
|
1891 |
+
register_conv_template(
|
1892 |
+
Conversation(
|
1893 |
+
name="xdan-v1",
|
1894 |
+
system_message="You are a helpful and harmless assistant named xDAN and created by xDAN-AI.Please response and work on questions thinking step by step.",
|
1895 |
+
roles=("### Human", "### Assistant"),
|
1896 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1897 |
+
sep="\n",
|
1898 |
+
stop_str="</s>",
|
1899 |
+
)
|
1900 |
+
)
|
1901 |
+
|
1902 |
+
# Zephyr template
|
1903 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/zephyr-playground/blob/main/dialogues.py
|
1904 |
+
register_conv_template(
|
1905 |
+
Conversation(
|
1906 |
+
name="zephyr",
|
1907 |
+
system_template="<|system|>\n{system_message}",
|
1908 |
+
roles=("<|user|>", "<|assistant|>"),
|
1909 |
+
sep_style=SeparatorStyle.CHATML,
|
1910 |
+
sep="</s>",
|
1911 |
+
stop_token_ids=[2],
|
1912 |
+
stop_str="</s>",
|
1913 |
+
)
|
1914 |
+
)
|
1915 |
+
|
1916 |
+
# CatPPT template
|
1917 |
+
# reference: https://huggingface.co/rishiraj/CatPPT
|
1918 |
+
register_conv_template(
|
1919 |
+
Conversation(
|
1920 |
+
name="catppt",
|
1921 |
+
system_template="<|system|>\n{system_message}",
|
1922 |
+
roles=("<|user|>", "<|assistant|>"),
|
1923 |
+
sep_style=SeparatorStyle.CHATML,
|
1924 |
+
sep="</s>",
|
1925 |
+
stop_token_ids=[2],
|
1926 |
+
stop_str="</s>",
|
1927 |
+
)
|
1928 |
+
)
|
1929 |
+
|
1930 |
+
# TinyLlama template
|
1931 |
+
# reference: https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
1932 |
+
register_conv_template(
|
1933 |
+
Conversation(
|
1934 |
+
name="TinyLlama",
|
1935 |
+
system_template="<|system|>\n{system_message}",
|
1936 |
+
roles=("<|user|>", "<|assistant|>"),
|
1937 |
+
sep_style=SeparatorStyle.CHATML,
|
1938 |
+
sep="</s>",
|
1939 |
+
stop_token_ids=[2],
|
1940 |
+
stop_str="</s>",
|
1941 |
+
)
|
1942 |
+
)
|
1943 |
+
|
1944 |
+
# Orca-2 template
|
1945 |
+
# reference: https://huggingface.co/microsoft/Orca-2-7b
|
1946 |
+
register_conv_template(
|
1947 |
+
Conversation(
|
1948 |
+
name="orca-2",
|
1949 |
+
system_template="<|im_start|>system\n{system_message}",
|
1950 |
+
system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.",
|
1951 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1952 |
+
sep_style=SeparatorStyle.CHATML,
|
1953 |
+
sep="<|im_end|>",
|
1954 |
+
stop_str="<|im_end|>",
|
1955 |
+
)
|
1956 |
+
)
|
1957 |
+
|
1958 |
+
# Deepseek-chat template
|
1959 |
+
# reference: https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat/blob/main/tokenizer_config.json
|
1960 |
+
register_conv_template(
|
1961 |
+
Conversation(
|
1962 |
+
name="deepseek-chat",
|
1963 |
+
system_message="<|begin▁of▁sentence|>", # must add a bos token before first message
|
1964 |
+
roles=("User", "Assistant"),
|
1965 |
+
sep_style=SeparatorStyle.DEEPSEEK_CHAT,
|
1966 |
+
sep="\n\n",
|
1967 |
+
sep2="<|end▁of▁sentence|>",
|
1968 |
+
stop_str="<|end▁of▁sentence|>",
|
1969 |
+
)
|
1970 |
+
)
|
1971 |
+
|
1972 |
+
# Yuan2.0 chat template
|
1973 |
+
# source: https://huggingface.co/IEITYuan/Yuan2-2B-Janus-hf/blob/main/tokenizer_config.json#L6
|
1974 |
+
register_conv_template(
|
1975 |
+
Conversation(
|
1976 |
+
name="yuan2",
|
1977 |
+
roles=("user", "assistant"),
|
1978 |
+
sep_style=SeparatorStyle.YUAN2,
|
1979 |
+
sep="<sep>",
|
1980 |
+
sep2="\n",
|
1981 |
+
stop_token_ids=[
|
1982 |
+
77185,
|
1983 |
+
], # "<eod>"
|
1984 |
+
stop_str="<eod>",
|
1985 |
+
)
|
1986 |
+
)
|
1987 |
+
|
1988 |
+
# Solar-10.7B Chat Template
|
1989 |
+
# Reference: https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0/blob/main/tokenizer_config.json
|
1990 |
+
register_conv_template(
|
1991 |
+
Conversation(
|
1992 |
+
name="solar",
|
1993 |
+
system_message="",
|
1994 |
+
roles=("### User", "### Assistant"),
|
1995 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
1996 |
+
sep="\n\n",
|
1997 |
+
stop_str="</s>",
|
1998 |
+
)
|
1999 |
+
)
|
2000 |
+
|
2001 |
+
# nvidia/Llama2-70B-SteerLM-Chat
|
2002 |
+
register_conv_template(
|
2003 |
+
Conversation(
|
2004 |
+
name="steerlm",
|
2005 |
+
system_message="",
|
2006 |
+
roles=("user", "assistant"),
|
2007 |
+
sep_style=SeparatorStyle.DEFAULT,
|
2008 |
+
sep=None,
|
2009 |
+
)
|
2010 |
+
)
|
2011 |
+
|
2012 |
+
# yuan 2.0 template
|
2013 |
+
# reference:https://github.com/IEIT-Yuan/Yuan-2.0
|
2014 |
+
# reference:https://huggingface.co/IEITYuan
|
2015 |
+
register_conv_template(
|
2016 |
+
Conversation(
|
2017 |
+
name="yuan",
|
2018 |
+
system_template="",
|
2019 |
+
roles=("", ""),
|
2020 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
2021 |
+
sep="<sep>",
|
2022 |
+
stop_str="<eod>",
|
2023 |
+
)
|
2024 |
+
)
|
2025 |
+
|
2026 |
+
# Cllm chat template
|
2027 |
+
# reference:
|
2028 |
+
register_conv_template(
|
2029 |
+
Conversation(
|
2030 |
+
name="cllm",
|
2031 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
2032 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
2033 |
+
roles=("USER", "ASSISTANT"),
|
2034 |
+
sep_style=SeparatorStyle.CLLM,
|
2035 |
+
sep=" ",
|
2036 |
+
sep2="</s>",
|
2037 |
+
)
|
2038 |
+
)
|
2039 |
+
|
2040 |
+
|
2041 |
+
# Llava-chatml
|
2042 |
+
# reference: https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/llava/conversation.py#L361
|
2043 |
+
register_conv_template(
|
2044 |
+
Conversation(
|
2045 |
+
name="llava-chatml",
|
2046 |
+
system_template="<|im_start|>system\n{system_message}",
|
2047 |
+
system_message="Answer the questions.",
|
2048 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
2049 |
+
sep_style=SeparatorStyle.CHATML,
|
2050 |
+
sep="<|im_end|>",
|
2051 |
+
stop_str="<|im_end|>",
|
2052 |
+
)
|
2053 |
+
)
|
2054 |
+
|
2055 |
+
# Gemma
|
2056 |
+
# reference: https://huggingface.co/google/gemma-7b-it?text=%3Cstart_of_turn%3Euser%0AHow+does+the+brain+work%3F%3Cend_of_turn%3E%0A%3Cstart_of_turn%3Emodel
|
2057 |
+
register_conv_template(
|
2058 |
+
Conversation(
|
2059 |
+
name="gemma",
|
2060 |
+
roles=("user", "model"),
|
2061 |
+
sep_style=SeparatorStyle.GEMMA,
|
2062 |
+
sep="<end_of_turn>\n",
|
2063 |
+
stop_str="<end_of_turn>",
|
2064 |
+
)
|
2065 |
+
)
|
2066 |
+
|
2067 |
+
register_conv_template(
|
2068 |
+
Conversation(
|
2069 |
+
name="yandexgpt",
|
2070 |
+
system_message="",
|
2071 |
+
roles=("user", "assistant"),
|
2072 |
+
sep_style=None,
|
2073 |
+
sep=None,
|
2074 |
+
)
|
2075 |
+
)
|
2076 |
+
|
2077 |
+
if __name__ == "__main__":
|
2078 |
+
from fastchat.conversation import get_conv_template
|
2079 |
+
|
2080 |
+
print("-- Vicuna template --")
|
2081 |
+
conv = get_conv_template("vicuna_v1.1")
|
2082 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2083 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2084 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2085 |
+
conv.append_message(conv.roles[1], None)
|
2086 |
+
print(conv.get_prompt())
|
2087 |
+
|
2088 |
+
print("\n")
|
2089 |
+
|
2090 |
+
print("-- Llama-2 template --")
|
2091 |
+
conv = get_conv_template("llama-2")
|
2092 |
+
conv.set_system_message("You are a helpful, respectful and honest assistant.")
|
2093 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2094 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2095 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2096 |
+
conv.append_message(conv.roles[1], None)
|
2097 |
+
print(conv.get_prompt())
|
2098 |
+
|
2099 |
+
print("\n")
|
2100 |
+
|
2101 |
+
print("-- ChatGPT template --")
|
2102 |
+
conv = get_conv_template("chatgpt")
|
2103 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2104 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2105 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2106 |
+
conv.append_message(conv.roles[1], None)
|
2107 |
+
print(conv.to_openai_api_messages())
|
2108 |
+
|
2109 |
+
print("\n")
|
2110 |
+
|
2111 |
+
print("-- Claude template --")
|
2112 |
+
conv = get_conv_template("claude")
|
2113 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2114 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2115 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2116 |
+
conv.append_message(conv.roles[1], None)
|
2117 |
+
print(conv.get_prompt())
|
fastchat/.ipynb_checkpoints/utils-checkpoint.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Common utilities.
|
3 |
+
"""
|
4 |
+
from asyncio import AbstractEventLoop
|
5 |
+
from io import BytesIO
|
6 |
+
import base64
|
7 |
+
import json
|
8 |
+
import logging
|
9 |
+
import logging.handlers
|
10 |
+
import os
|
11 |
+
import platform
|
12 |
+
import sys
|
13 |
+
import time
|
14 |
+
from typing import AsyncGenerator, Generator
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
import requests
|
18 |
+
|
19 |
+
from fastchat.constants import LOGDIR
|
20 |
+
|
21 |
+
|
22 |
+
handler = None
|
23 |
+
visited_loggers = set()
|
24 |
+
|
25 |
+
|
26 |
+
def build_logger(logger_name, logger_filename):
|
27 |
+
global handler
|
28 |
+
|
29 |
+
formatter = logging.Formatter(
|
30 |
+
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
31 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
32 |
+
)
|
33 |
+
|
34 |
+
# Set the format of root handlers
|
35 |
+
if not logging.getLogger().handlers:
|
36 |
+
if sys.version_info[1] >= 9:
|
37 |
+
# This is for windows
|
38 |
+
logging.basicConfig(level=logging.INFO, encoding="utf-8")
|
39 |
+
else:
|
40 |
+
if platform.system() == "Windows":
|
41 |
+
warnings.warn(
|
42 |
+
"If you are running on Windows, "
|
43 |
+
"we recommend you use Python >= 3.9 for UTF-8 encoding."
|
44 |
+
)
|
45 |
+
logging.basicConfig(level=logging.INFO)
|
46 |
+
logging.getLogger().handlers[0].setFormatter(formatter)
|
47 |
+
|
48 |
+
# Redirect stdout and stderr to loggers
|
49 |
+
stdout_logger = logging.getLogger("stdout")
|
50 |
+
stdout_logger.setLevel(logging.INFO)
|
51 |
+
sl = StreamToLogger(stdout_logger, logging.INFO)
|
52 |
+
sys.stdout = sl
|
53 |
+
|
54 |
+
stderr_logger = logging.getLogger("stderr")
|
55 |
+
stderr_logger.setLevel(logging.ERROR)
|
56 |
+
sl = StreamToLogger(stderr_logger, logging.ERROR)
|
57 |
+
sys.stderr = sl
|
58 |
+
|
59 |
+
# Get logger
|
60 |
+
logger = logging.getLogger(logger_name)
|
61 |
+
logger.setLevel(logging.INFO)
|
62 |
+
|
63 |
+
# Avoid httpx flooding POST logs
|
64 |
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
65 |
+
|
66 |
+
# if LOGDIR is empty, then don't try output log to local file
|
67 |
+
if LOGDIR != "":
|
68 |
+
os.makedirs(LOGDIR, exist_ok=True)
|
69 |
+
filename = os.path.join(LOGDIR, logger_filename)
|
70 |
+
handler = logging.handlers.TimedRotatingFileHandler(
|
71 |
+
filename, when="D", utc=True, encoding="utf-8"
|
72 |
+
)
|
73 |
+
handler.setFormatter(formatter)
|
74 |
+
|
75 |
+
for l in [stdout_logger, stderr_logger, logger]:
|
76 |
+
if l in visited_loggers:
|
77 |
+
continue
|
78 |
+
visited_loggers.add(l)
|
79 |
+
l.addHandler(handler)
|
80 |
+
|
81 |
+
return logger
|
82 |
+
|
83 |
+
|
84 |
+
class StreamToLogger(object):
|
85 |
+
"""
|
86 |
+
Fake file-like stream object that redirects writes to a logger instance.
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self, logger, log_level=logging.INFO):
|
90 |
+
self.terminal = sys.stdout
|
91 |
+
self.logger = logger
|
92 |
+
self.log_level = log_level
|
93 |
+
self.linebuf = ""
|
94 |
+
|
95 |
+
def __getattr__(self, attr):
|
96 |
+
return getattr(self.terminal, attr)
|
97 |
+
|
98 |
+
def write(self, buf):
|
99 |
+
temp_linebuf = self.linebuf + buf
|
100 |
+
self.linebuf = ""
|
101 |
+
for line in temp_linebuf.splitlines(True):
|
102 |
+
# From the io.TextIOWrapper docs:
|
103 |
+
# On output, if newline is None, any '\n' characters written
|
104 |
+
# are translated to the system default line separator.
|
105 |
+
# By default sys.stdout.write() expects '\n' newlines and then
|
106 |
+
# translates them so this is still cross platform.
|
107 |
+
if line[-1] == "\n":
|
108 |
+
encoded_message = line.encode("utf-8", "ignore").decode("utf-8")
|
109 |
+
self.logger.log(self.log_level, encoded_message.rstrip())
|
110 |
+
else:
|
111 |
+
self.linebuf += line
|
112 |
+
|
113 |
+
def flush(self):
|
114 |
+
if self.linebuf != "":
|
115 |
+
encoded_message = self.linebuf.encode("utf-8", "ignore").decode("utf-8")
|
116 |
+
self.logger.log(self.log_level, encoded_message.rstrip())
|
117 |
+
self.linebuf = ""
|
118 |
+
|
119 |
+
|
120 |
+
def disable_torch_init():
|
121 |
+
"""
|
122 |
+
Disable the redundant torch default initialization to accelerate model creation.
|
123 |
+
"""
|
124 |
+
import torch
|
125 |
+
|
126 |
+
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
|
127 |
+
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
|
128 |
+
|
129 |
+
|
130 |
+
def get_gpu_memory(max_gpus=None):
|
131 |
+
"""Get available memory for each GPU."""
|
132 |
+
import torch
|
133 |
+
|
134 |
+
gpu_memory = []
|
135 |
+
num_gpus = (
|
136 |
+
torch.cuda.device_count()
|
137 |
+
if max_gpus is None
|
138 |
+
else min(max_gpus, torch.cuda.device_count())
|
139 |
+
)
|
140 |
+
|
141 |
+
for gpu_id in range(num_gpus):
|
142 |
+
with torch.cuda.device(gpu_id):
|
143 |
+
device = torch.cuda.current_device()
|
144 |
+
gpu_properties = torch.cuda.get_device_properties(device)
|
145 |
+
total_memory = gpu_properties.total_memory / (1024**3)
|
146 |
+
allocated_memory = torch.cuda.memory_allocated() / (1024**3)
|
147 |
+
available_memory = total_memory - allocated_memory
|
148 |
+
gpu_memory.append(available_memory)
|
149 |
+
return gpu_memory
|
150 |
+
|
151 |
+
|
152 |
+
def oai_moderation(text, custom_thresholds=None):
|
153 |
+
"""
|
154 |
+
Check whether the text violates OpenAI moderation API.
|
155 |
+
"""
|
156 |
+
import openai
|
157 |
+
|
158 |
+
client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
159 |
+
|
160 |
+
# default to true to be conservative
|
161 |
+
flagged = True
|
162 |
+
MAX_RETRY = 3
|
163 |
+
for _ in range(MAX_RETRY):
|
164 |
+
try:
|
165 |
+
res = client.moderations.create(input=text)
|
166 |
+
flagged = res.results[0].flagged
|
167 |
+
if custom_thresholds is not None:
|
168 |
+
for category, threshold in custom_thresholds.items():
|
169 |
+
if getattr(res.results[0].category_scores, category) > threshold:
|
170 |
+
flagged = True
|
171 |
+
break
|
172 |
+
except (openai.OpenAIError, KeyError, IndexError) as e:
|
173 |
+
print(f"MODERATION ERROR: {e}\nInput: {text}")
|
174 |
+
return flagged
|
175 |
+
|
176 |
+
|
177 |
+
def moderation_filter(text, model_list, do_moderation=False):
|
178 |
+
# Apply moderation for below models
|
179 |
+
MODEL_KEYWORDS = [
|
180 |
+
"claude",
|
181 |
+
"gpt",
|
182 |
+
"bard",
|
183 |
+
"mistral-large",
|
184 |
+
"command-r",
|
185 |
+
"dbrx",
|
186 |
+
"gemini",
|
187 |
+
"reka",
|
188 |
+
]
|
189 |
+
|
190 |
+
custom_thresholds = {"sexual": 0.3}
|
191 |
+
# set a stricter threshold for claude
|
192 |
+
for model in model_list:
|
193 |
+
if "claude" in model:
|
194 |
+
custom_thresholds = {"sexual": 0.2}
|
195 |
+
|
196 |
+
for keyword in MODEL_KEYWORDS:
|
197 |
+
for model in model_list:
|
198 |
+
if keyword in model:
|
199 |
+
do_moderation = True
|
200 |
+
break
|
201 |
+
|
202 |
+
if do_moderation:
|
203 |
+
return oai_moderation(text, custom_thresholds)
|
204 |
+
return False
|
205 |
+
|
206 |
+
|
207 |
+
def clean_flant5_ckpt(ckpt_path):
|
208 |
+
"""
|
209 |
+
Flan-t5 trained with HF+FSDP saves corrupted weights for shared embeddings,
|
210 |
+
Use this function to make sure it can be correctly loaded.
|
211 |
+
"""
|
212 |
+
import torch
|
213 |
+
|
214 |
+
index_file = os.path.join(ckpt_path, "pytorch_model.bin.index.json")
|
215 |
+
index_json = json.load(open(index_file, "r"))
|
216 |
+
|
217 |
+
weightmap = index_json["weight_map"]
|
218 |
+
|
219 |
+
share_weight_file = weightmap["shared.weight"]
|
220 |
+
share_weight = torch.load(os.path.join(ckpt_path, share_weight_file))[
|
221 |
+
"shared.weight"
|
222 |
+
]
|
223 |
+
|
224 |
+
for weight_name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]:
|
225 |
+
weight_file = weightmap[weight_name]
|
226 |
+
weight = torch.load(os.path.join(ckpt_path, weight_file))
|
227 |
+
weight[weight_name] = share_weight
|
228 |
+
torch.save(weight, os.path.join(ckpt_path, weight_file))
|
229 |
+
|
230 |
+
|
231 |
+
def pretty_print_semaphore(semaphore):
|
232 |
+
"""Print a semaphore in better format."""
|
233 |
+
if semaphore is None:
|
234 |
+
return "None"
|
235 |
+
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
|
236 |
+
|
237 |
+
|
238 |
+
"""A javascript function to get url parameters for the gradio web server."""
|
239 |
+
get_window_url_params_js = """
|
240 |
+
function() {
|
241 |
+
const params = new URLSearchParams(window.location.search);
|
242 |
+
url_params = Object.fromEntries(params);
|
243 |
+
console.log("url_params", url_params);
|
244 |
+
return url_params;
|
245 |
+
}
|
246 |
+
"""
|
247 |
+
|
248 |
+
get_window_url_params_with_tos_js = """
|
249 |
+
function() {
|
250 |
+
const params = new URLSearchParams(window.location.search);
|
251 |
+
const url_params = Object.fromEntries(params);
|
252 |
+
console.log("url_params", url_params);
|
253 |
+
|
254 |
+
const urlContainsLeaderboard = Object.keys(url_params).some(key => key.toLowerCase().includes("leaderboard"));
|
255 |
+
const msg = "Users of this website are required to agree to the following terms:\\n\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nPlease do not upload any private information.\\nThe service collects user dialogue data, including both text and images, and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license.";
|
256 |
+
if (!urlContainsLeaderboard) {
|
257 |
+
if (window.alerted_before) return;
|
258 |
+
alert(msg);
|
259 |
+
window.alerted_before = true;
|
260 |
+
}
|
261 |
+
return url_params;
|
262 |
+
}
|
263 |
+
"""
|
264 |
+
|
265 |
+
alert_js = """
|
266 |
+
() => {
|
267 |
+
if (window.alerted_before) return;
|
268 |
+
const msg = "Users of this website are required to agree to the following terms:\\n\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nPlease do not upload any private information.\\nThe service collects user dialogue data, including both text and images, and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license.";
|
269 |
+
alert(msg);
|
270 |
+
window.alerted_before = true;
|
271 |
+
}
|
272 |
+
"""
|
273 |
+
|
274 |
+
|
275 |
+
def iter_over_async(
|
276 |
+
async_gen: AsyncGenerator, event_loop: AbstractEventLoop
|
277 |
+
) -> Generator:
|
278 |
+
"""
|
279 |
+
Convert async generator to sync generator
|
280 |
+
|
281 |
+
:param async_gen: the AsyncGenerator to convert
|
282 |
+
:param event_loop: the event loop to run on
|
283 |
+
:returns: Sync generator
|
284 |
+
"""
|
285 |
+
ait = async_gen.__aiter__()
|
286 |
+
|
287 |
+
async def get_next():
|
288 |
+
try:
|
289 |
+
obj = await ait.__anext__()
|
290 |
+
return False, obj
|
291 |
+
except StopAsyncIteration:
|
292 |
+
return True, None
|
293 |
+
|
294 |
+
while True:
|
295 |
+
done, obj = event_loop.run_until_complete(get_next())
|
296 |
+
if done:
|
297 |
+
break
|
298 |
+
yield obj
|
299 |
+
|
300 |
+
|
301 |
+
def detect_language(text: str) -> str:
|
302 |
+
"""Detect the langauge of a string."""
|
303 |
+
import polyglot # pip3 install polyglot pyicu pycld2
|
304 |
+
from polyglot.detect import Detector
|
305 |
+
from polyglot.detect.base import logger as polyglot_logger
|
306 |
+
import pycld2
|
307 |
+
|
308 |
+
polyglot_logger.setLevel("ERROR")
|
309 |
+
|
310 |
+
try:
|
311 |
+
lang_code = Detector(text).language.name
|
312 |
+
except (pycld2.error, polyglot.detect.base.UnknownLanguage):
|
313 |
+
lang_code = "unknown"
|
314 |
+
return lang_code
|
315 |
+
|
316 |
+
|
317 |
+
def parse_gradio_auth_creds(filename: str):
|
318 |
+
"""Parse a username:password file for gradio authorization."""
|
319 |
+
gradio_auth_creds = []
|
320 |
+
with open(filename, "r", encoding="utf8") as file:
|
321 |
+
for line in file.readlines():
|
322 |
+
gradio_auth_creds += [x.strip() for x in line.split(",") if x.strip()]
|
323 |
+
if gradio_auth_creds:
|
324 |
+
auth = [tuple(cred.split(":")) for cred in gradio_auth_creds]
|
325 |
+
else:
|
326 |
+
auth = None
|
327 |
+
return auth
|
328 |
+
|
329 |
+
|
330 |
+
def is_partial_stop(output: str, stop_str: str):
|
331 |
+
"""Check whether the output contains a partial stop str."""
|
332 |
+
for i in range(0, min(len(output), len(stop_str))):
|
333 |
+
if stop_str.startswith(output[-i:]):
|
334 |
+
return True
|
335 |
+
return False
|
336 |
+
|
337 |
+
|
338 |
+
def run_cmd(cmd: str):
|
339 |
+
"""Run a bash command."""
|
340 |
+
print(cmd)
|
341 |
+
return os.system(cmd)
|
342 |
+
|
343 |
+
|
344 |
+
def is_sentence_complete(output: str):
|
345 |
+
"""Check whether the output is a complete sentence."""
|
346 |
+
end_symbols = (".", "?", "!", "...", "。", "?", "!", "…", '"', "'", "”")
|
347 |
+
return output.endswith(end_symbols)
|
348 |
+
|
349 |
+
|
350 |
+
# Models don't use the same configuration key for determining the maximum
|
351 |
+
# sequence length. Store them here so we can sanely check them.
|
352 |
+
# NOTE: The ordering here is important. Some models have two of these and we
|
353 |
+
# have a preference for which value gets used.
|
354 |
+
SEQUENCE_LENGTH_KEYS = [
|
355 |
+
"max_position_embeddings",
|
356 |
+
"max_sequence_length",
|
357 |
+
"seq_length",
|
358 |
+
"max_seq_len",
|
359 |
+
"model_max_length",
|
360 |
+
]
|
361 |
+
|
362 |
+
|
363 |
+
def get_context_length(config):
|
364 |
+
"""Get the context length of a model from a huggingface model config."""
|
365 |
+
rope_scaling = getattr(config, "rope_scaling", None)
|
366 |
+
if rope_scaling:
|
367 |
+
rope_scaling_factor = config.rope_scaling["factor"]
|
368 |
+
else:
|
369 |
+
rope_scaling_factor = 1
|
370 |
+
|
371 |
+
for key in SEQUENCE_LENGTH_KEYS:
|
372 |
+
val = getattr(config, key, None)
|
373 |
+
if val is not None:
|
374 |
+
return int(rope_scaling_factor * val)
|
375 |
+
return 2048
|
376 |
+
|
377 |
+
|
378 |
+
def str_to_torch_dtype(dtype: str):
|
379 |
+
import torch
|
380 |
+
|
381 |
+
if dtype is None:
|
382 |
+
return None
|
383 |
+
elif dtype == "float32":
|
384 |
+
return torch.float32
|
385 |
+
elif dtype == "float16":
|
386 |
+
return torch.float16
|
387 |
+
elif dtype == "bfloat16":
|
388 |
+
return torch.bfloat16
|
389 |
+
else:
|
390 |
+
raise ValueError(f"Unrecognized dtype: {dtype}")
|
391 |
+
|
392 |
+
|
393 |
+
def load_image(image_file):
|
394 |
+
from PIL import Image
|
395 |
+
import requests
|
396 |
+
|
397 |
+
image = None
|
398 |
+
|
399 |
+
if image_file.startswith("http://") or image_file.startswith("https://"):
|
400 |
+
timeout = int(os.getenv("REQUEST_TIMEOUT", "3"))
|
401 |
+
response = requests.get(image_file, timeout=timeout)
|
402 |
+
image = Image.open(BytesIO(response.content))
|
403 |
+
elif image_file.lower().endswith(("png", "jpg", "jpeg", "webp", "gif")):
|
404 |
+
image = Image.open(image_file)
|
405 |
+
elif image_file.startswith("data:"):
|
406 |
+
image_file = image_file.split(",")[1]
|
407 |
+
image = Image.open(BytesIO(base64.b64decode(image_file)))
|
408 |
+
else:
|
409 |
+
image = Image.open(BytesIO(base64.b64decode(image_file)))
|
410 |
+
|
411 |
+
return image
|
412 |
+
|
413 |
+
|
414 |
+
def upload_image_file_to_gcs(image, filename):
|
415 |
+
from google.cloud import storage
|
416 |
+
import io
|
417 |
+
|
418 |
+
storage_client = storage.Client()
|
419 |
+
# upload file to GCS
|
420 |
+
bucket = storage_client.get_bucket("arena_service_data")
|
421 |
+
|
422 |
+
blob = bucket.blob(f"{filename}")
|
423 |
+
if not blob.exists():
|
424 |
+
buffer = io.BytesIO()
|
425 |
+
image.save(buffer, format="PNG")
|
426 |
+
buffer.seek(0)
|
427 |
+
blob.upload_from_file(buffer, content_type="image/png")
|
428 |
+
|
429 |
+
return blob.public_url
|
430 |
+
|
431 |
+
|
432 |
+
def get_image_file_from_gcs(filename):
|
433 |
+
from google.cloud import storage
|
434 |
+
|
435 |
+
storage_client = storage.Client()
|
436 |
+
bucket = storage_client.get_bucket("arena_service_data")
|
437 |
+
blob = bucket.blob(f"{filename}")
|
438 |
+
contents = blob.download_as_bytes()
|
439 |
+
|
440 |
+
return contents
|
441 |
+
|
442 |
+
|
443 |
+
def image_moderation_request(image_bytes, endpoint, api_key):
|
444 |
+
headers = {"Content-Type": "image/jpeg", "Ocp-Apim-Subscription-Key": api_key}
|
445 |
+
|
446 |
+
MAX_RETRIES = 3
|
447 |
+
for _ in range(MAX_RETRIES):
|
448 |
+
response = requests.post(endpoint, headers=headers, data=image_bytes).json()
|
449 |
+
try:
|
450 |
+
if response["Status"]["Code"] == 3000:
|
451 |
+
break
|
452 |
+
except:
|
453 |
+
time.sleep(0.5)
|
454 |
+
return response
|
455 |
+
|
456 |
+
|
457 |
+
def image_moderation_provider(image, api_type):
|
458 |
+
if api_type == "nsfw":
|
459 |
+
endpoint = os.environ["AZURE_IMG_MODERATION_ENDPOINT"]
|
460 |
+
api_key = os.environ["AZURE_IMG_MODERATION_API_KEY"]
|
461 |
+
response = image_moderation_request(image, endpoint, api_key)
|
462 |
+
print(response)
|
463 |
+
return response["IsImageAdultClassified"]
|
464 |
+
elif api_type == "csam":
|
465 |
+
endpoint = (
|
466 |
+
"https://api.microsoftmoderator.com/photodna/v1.0/Match?enhance=false"
|
467 |
+
)
|
468 |
+
api_key = os.environ["PHOTODNA_API_KEY"]
|
469 |
+
response = image_moderation_request(image, endpoint, api_key)
|
470 |
+
return response["IsMatch"]
|
471 |
+
|
472 |
+
|
473 |
+
def image_moderation_filter(image):
|
474 |
+
print(f"moderating image")
|
475 |
+
|
476 |
+
image_bytes = base64.b64decode(image.base64_str)
|
477 |
+
|
478 |
+
nsfw_flagged = image_moderation_provider(image_bytes, "nsfw")
|
479 |
+
csam_flagged = False
|
480 |
+
|
481 |
+
if nsfw_flagged:
|
482 |
+
csam_flagged = image_moderation_provider(image_bytes, "csam")
|
483 |
+
|
484 |
+
return nsfw_flagged, csam_flagged
|
fastchat/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__version__ = "0.2.36"
|
fastchat/constants.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Global constants.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from enum import IntEnum
|
6 |
+
import os
|
7 |
+
|
8 |
+
REPO_PATH = os.path.dirname(os.path.dirname(__file__))
|
9 |
+
|
10 |
+
|
11 |
+
TASKS = ["summarization", "question-answering", "sentiment-analysis", "text-classification", "toxic-detection", "knowledge-opended", "knowledge-mtpchoice", "language-modelling", "information-retrieval", "reasoning", "math"]
|
12 |
+
|
13 |
+
# Survey Link URL (to be removed)
|
14 |
+
SURVEY_LINK = """<div style='text-align: center; margin: 20px 0;'>
|
15 |
+
<div style='display: inline-block; border: 2px solid #DE3163; padding: 10px; border-radius: 5px;'>
|
16 |
+
<span style='color: #DE3163; font-weight: bold;'>We would love your feedback! Fill out <a href='https://docs.google.com/forms/d/e/1FAIpQLSfKSxwFOW6qD05phh4fwYjk8q0YV1VQe_bmK0_qOVTbC66_MA/viewform?usp=sf_link' style='color: #DE3163; text-decoration: underline;'>this short survey</a> to tell us what you like about the arena, what you don't like, and what you want to see in the future.</span>
|
17 |
+
</div>
|
18 |
+
</div>"""
|
19 |
+
|
20 |
+
##### For the gradio web server
|
21 |
+
SERVER_ERROR_MSG = (
|
22 |
+
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
23 |
+
)
|
24 |
+
TEXT_MODERATION_MSG = (
|
25 |
+
"$MODERATION$ YOUR TEXT VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
26 |
+
)
|
27 |
+
IMAGE_MODERATION_MSG = (
|
28 |
+
"$MODERATION$ YOUR IMAGE VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
29 |
+
)
|
30 |
+
MODERATION_MSG = "$MODERATION$ YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES."
|
31 |
+
CONVERSATION_LIMIT_MSG = "YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION."
|
32 |
+
INACTIVE_MSG = "THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE."
|
33 |
+
SLOW_MODEL_MSG = "⚠️ Both models will show the responses all at once. Please stay patient as it may take over 30 seconds."
|
34 |
+
RATE_LIMIT_MSG = "**RATE LIMIT OF THIS MODEL IS REACHED. PLEASE COME BACK LATER OR USE <span style='color: red; font-weight: bold;'>[BATTLE MODE](https://chat.lmsys.org)</span> (the 1st tab).**"
|
35 |
+
# Maximum input length
|
36 |
+
INPUT_CHAR_LEN_LIMIT = int(os.getenv("FASTCHAT_INPUT_CHAR_LEN_LIMIT", 12000))
|
37 |
+
BLIND_MODE_INPUT_CHAR_LEN_LIMIT = int(
|
38 |
+
os.getenv("FASTCHAT_BLIND_MODE_INPUT_CHAR_LEN_LIMIT", 30000)
|
39 |
+
)
|
40 |
+
# Maximum conversation turns
|
41 |
+
CONVERSATION_TURN_LIMIT = 50
|
42 |
+
# Session expiration time
|
43 |
+
SESSION_EXPIRATION_TIME = 3600
|
44 |
+
# The output dir of log files
|
45 |
+
LOGDIR = os.getenv("LOGDIR", ".")
|
46 |
+
# CPU Instruction Set Architecture
|
47 |
+
CPU_ISA = os.getenv("CPU_ISA")
|
48 |
+
|
49 |
+
|
50 |
+
##### For the controller and workers (could be overwritten through ENV variables.)
|
51 |
+
CONTROLLER_HEART_BEAT_EXPIRATION = int(
|
52 |
+
os.getenv("FASTCHAT_CONTROLLER_HEART_BEAT_EXPIRATION", 90)
|
53 |
+
)
|
54 |
+
WORKER_HEART_BEAT_INTERVAL = int(os.getenv("FASTCHAT_WORKER_HEART_BEAT_INTERVAL", 45))
|
55 |
+
WORKER_API_TIMEOUT = int(os.getenv("FASTCHAT_WORKER_API_TIMEOUT", 100))
|
56 |
+
WORKER_API_EMBEDDING_BATCH_SIZE = int(
|
57 |
+
os.getenv("FASTCHAT_WORKER_API_EMBEDDING_BATCH_SIZE", 4)
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
class ErrorCode(IntEnum):
|
62 |
+
"""
|
63 |
+
https://platform.openai.com/docs/guides/error-codes/api-errors
|
64 |
+
"""
|
65 |
+
|
66 |
+
VALIDATION_TYPE_ERROR = 40001
|
67 |
+
|
68 |
+
INVALID_AUTH_KEY = 40101
|
69 |
+
INCORRECT_AUTH_KEY = 40102
|
70 |
+
NO_PERMISSION = 40103
|
71 |
+
|
72 |
+
INVALID_MODEL = 40301
|
73 |
+
PARAM_OUT_OF_RANGE = 40302
|
74 |
+
CONTEXT_OVERFLOW = 40303
|
75 |
+
|
76 |
+
RATE_LIMIT = 42901
|
77 |
+
QUOTA_EXCEEDED = 42902
|
78 |
+
ENGINE_OVERLOADED = 42903
|
79 |
+
|
80 |
+
INTERNAL_ERROR = 50001
|
81 |
+
CUDA_OUT_OF_MEMORY = 50002
|
82 |
+
GRADIO_REQUEST_ERROR = 50003
|
83 |
+
GRADIO_STREAM_UNKNOWN_ERROR = 50004
|
84 |
+
CONTROLLER_NO_WORKER = 50005
|
85 |
+
CONTROLLER_WORKER_TIMEOUT = 50006
|
86 |
+
|
87 |
+
LANGUAGES = list(map(lambda x: (x[1], x[0]), [
|
88 |
+
('aa', 'Afar'),
|
89 |
+
('ab', 'Abkhazian'),
|
90 |
+
('af', 'Afrikaans'),
|
91 |
+
('ak', 'Akan'),
|
92 |
+
('sq', 'Albanian'),
|
93 |
+
('am', 'Amharic'),
|
94 |
+
('ar', 'Arabic'),
|
95 |
+
('an', 'Aragonese'),
|
96 |
+
('hy', 'Armenian'),
|
97 |
+
('as', 'Assamese'),
|
98 |
+
('av', 'Avaric'),
|
99 |
+
('ae', 'Avestan'),
|
100 |
+
('ay', 'Aymara'),
|
101 |
+
('az', 'Azerbaijani'),
|
102 |
+
('ba', 'Bashkir'),
|
103 |
+
('bm', 'Bambara'),
|
104 |
+
('eu', 'Basque'),
|
105 |
+
('be', 'Belarusian'),
|
106 |
+
('bn', 'Bengali'),
|
107 |
+
('bh', 'Bihari languages'),
|
108 |
+
('bi', 'Bislama'),
|
109 |
+
('bo', 'Tibetan'),
|
110 |
+
('bs', 'Bosnian'),
|
111 |
+
('br', 'Breton'),
|
112 |
+
('bg', 'Bulgarian'),
|
113 |
+
('my', 'Burmese'),
|
114 |
+
('ca', 'Catalan; Valencian'),
|
115 |
+
('cs', 'Czech'),
|
116 |
+
('ch', 'Chamorro'),
|
117 |
+
('ce', 'Chechen'),
|
118 |
+
('zh', 'Chinese'),
|
119 |
+
('cu', 'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic'),
|
120 |
+
('cv', 'Chuvash'),
|
121 |
+
('kw', 'Cornish'),
|
122 |
+
('co', 'Corsican'),
|
123 |
+
('cr', 'Cree'),
|
124 |
+
('cy', 'Welsh'),
|
125 |
+
('cs', 'Czech'),
|
126 |
+
('da', 'Danish'),
|
127 |
+
('de', 'German'),
|
128 |
+
('dv', 'Divehi; Dhivehi; Maldivian'),
|
129 |
+
('nl', 'Dutch; Flemish'),
|
130 |
+
('dz', 'Dzongkha'),
|
131 |
+
('el', 'Greek, Modern (1453-)'),
|
132 |
+
('en', 'English'),
|
133 |
+
('eo', 'Esperanto'),
|
134 |
+
('et', 'Estonian'),
|
135 |
+
('eu', 'Basque'),
|
136 |
+
('ee', 'Ewe'),
|
137 |
+
('fo', 'Faroese'),
|
138 |
+
('fa', 'Persian'),
|
139 |
+
('fj', 'Fijian'),
|
140 |
+
('fi', 'Finnish'),
|
141 |
+
('fr', 'French'),
|
142 |
+
('fr', 'French'),
|
143 |
+
('fy', 'Western Frisian'),
|
144 |
+
('ff', 'Fulah'),
|
145 |
+
('Ga', 'Georgian'),
|
146 |
+
('de', 'German'),
|
147 |
+
('gd', 'Gaelic; Scottish Gaelic'),
|
148 |
+
('ga', 'Irish'),
|
149 |
+
('gl', 'Galician'),
|
150 |
+
('gv', 'Manx'),
|
151 |
+
('el', 'Greek, Modern (1453-)'),
|
152 |
+
('gn', 'Guarani'),
|
153 |
+
('gu', 'Gujarati'),
|
154 |
+
('ht', 'Haitian; Haitian Creole'),
|
155 |
+
('ha', 'Hausa'),
|
156 |
+
('he', 'Hebrew'),
|
157 |
+
('hz', 'Herero'),
|
158 |
+
('hi', 'Hindi'),
|
159 |
+
('ho', 'Hiri Motu'),
|
160 |
+
('hr', 'Croatian'),
|
161 |
+
('hu', 'Hungarian'),
|
162 |
+
('hy', 'Armenian'),
|
163 |
+
('ig', 'Igbo'),
|
164 |
+
('is', 'Icelandic'),
|
165 |
+
('io', 'Ido'),
|
166 |
+
('ii', 'Sichuan Yi; Nuosu'),
|
167 |
+
('iu', 'Inuktitut'),
|
168 |
+
('ie', 'Interlingue; Occidental'),
|
169 |
+
('ia', 'Interlingua (International Auxiliary Language Association)'),
|
170 |
+
('id', 'Indonesian'),
|
171 |
+
('ik', 'Inupiaq'),
|
172 |
+
('is', 'Icelandic'),
|
173 |
+
('it', 'Italian'),
|
174 |
+
('jv', 'Javanese'),
|
175 |
+
('ja', 'Japanese'),
|
176 |
+
('kl', 'Kalaallisut; Greenlandic'),
|
177 |
+
('kn', 'Kannada'),
|
178 |
+
('ks', 'Kashmiri'),
|
179 |
+
('ka', 'Georgian'),
|
180 |
+
('kr', 'Kanuri'),
|
181 |
+
('kk', 'Kazakh'),
|
182 |
+
('km', 'Central Khmer'),
|
183 |
+
('ki', 'Kikuyu; Gikuyu'),
|
184 |
+
('rw', 'Kinyarwanda'),
|
185 |
+
('ky', 'Kirghiz; Kyrgyz'),
|
186 |
+
('kv', 'Komi'),
|
187 |
+
('kg', 'Kongo'),
|
188 |
+
('ko', 'Korean'),
|
189 |
+
('kj', 'Kuanyama; Kwanyama'),
|
190 |
+
('ku', 'Kurdish'),
|
191 |
+
('lo', 'Lao'),
|
192 |
+
('la', 'Latin'),
|
193 |
+
('lv', 'Latvian'),
|
194 |
+
('li', 'Limburgan; Limburger; Limburgish'),
|
195 |
+
('ln', 'Lingala'),
|
196 |
+
('lt', 'Lithuanian'),
|
197 |
+
('lb', 'Luxembourgish; Letzeburgesch'),
|
198 |
+
('lu', 'Luba-Katanga'),
|
199 |
+
('lg', 'Ganda'),
|
200 |
+
('mk', 'Macedonian'),
|
201 |
+
('mh', 'Marshallese'),
|
202 |
+
('ml', 'Malayalam'),
|
203 |
+
('mi', 'Maori'),
|
204 |
+
('mr', 'Marathi'),
|
205 |
+
('ms', 'Malay'),
|
206 |
+
('Mi', 'Micmac'),
|
207 |
+
('mk', 'Macedonian'),
|
208 |
+
('mg', 'Malagasy'),
|
209 |
+
('mt', 'Maltese'),
|
210 |
+
('mn', 'Mongolian'),
|
211 |
+
('mi', 'Maori'),
|
212 |
+
('ms', 'Malay'),
|
213 |
+
('my', 'Burmese'),
|
214 |
+
('na', 'Nauru'),
|
215 |
+
('nv', 'Navajo; Navaho'),
|
216 |
+
('nr', 'Ndebele, South; South Ndebele'),
|
217 |
+
('nd', 'Ndebele, North; North Ndebele'),
|
218 |
+
('ng', 'Ndonga'),
|
219 |
+
('ne', 'Nepali'),
|
220 |
+
('nl', 'Dutch; Flemish'),
|
221 |
+
('nn', 'Norwegian Nynorsk; Nynorsk, Norwegian'),
|
222 |
+
('nb', 'Bokmål, Norwegian; Norwegian Bokmål'),
|
223 |
+
('no', 'Norwegian'),
|
224 |
+
('oc', 'Occitan (post 1500)'),
|
225 |
+
('oj', 'Ojibwa'),
|
226 |
+
('or', 'Oriya'),
|
227 |
+
('om', 'Oromo'),
|
228 |
+
('os', 'Ossetian; Ossetic'),
|
229 |
+
('pa', 'Panjabi; Punjabi'),
|
230 |
+
('fa', 'Persian'),
|
231 |
+
('pi', 'Pali'),
|
232 |
+
('pl', 'Polish'),
|
233 |
+
('pt', 'Portuguese'),
|
234 |
+
('ps', 'Pushto; Pashto'),
|
235 |
+
('qu', 'Quechua'),
|
236 |
+
('rm', 'Romansh'),
|
237 |
+
('ro', 'Romanian; Moldavian; Moldovan'),
|
238 |
+
('ro', 'Romanian; Moldavian; Moldovan'),
|
239 |
+
('rn', 'Rundi'),
|
240 |
+
('ru', 'Russian'),
|
241 |
+
('sg', 'Sango'),
|
242 |
+
('sa', 'Sanskrit'),
|
243 |
+
('si', 'Sinhala; Sinhalese'),
|
244 |
+
('sk', 'Slovak'),
|
245 |
+
('sk', 'Slovak'),
|
246 |
+
('sl', 'Slovenian'),
|
247 |
+
('se', 'Northern Sami'),
|
248 |
+
('sm', 'Samoan'),
|
249 |
+
('sn', 'Shona'),
|
250 |
+
('sd', 'Sindhi'),
|
251 |
+
('so', 'Somali'),
|
252 |
+
('st', 'Sotho, Southern'),
|
253 |
+
('es', 'Spanish; Castilian'),
|
254 |
+
('sq', 'Albanian'),
|
255 |
+
('sc', 'Sardinian'),
|
256 |
+
('sr', 'Serbian'),
|
257 |
+
('ss', 'Swati'),
|
258 |
+
('su', 'Sundanese'),
|
259 |
+
('sw', 'Swahili'),
|
260 |
+
('sv', 'Swedish'),
|
261 |
+
('ty', 'Tahitian'),
|
262 |
+
('ta', 'Tamil'),
|
263 |
+
('tt', 'Tatar'),
|
264 |
+
('te', 'Telugu'),
|
265 |
+
('tg', 'Tajik'),
|
266 |
+
('tl', 'Tagalog'),
|
267 |
+
('th', 'Thai'),
|
268 |
+
('bo', 'Tibetan'),
|
269 |
+
('ti', 'Tigrinya'),
|
270 |
+
('to', 'Tonga (Tonga Islands)'),
|
271 |
+
('tn', 'Tswana'),
|
272 |
+
('ts', 'Tsonga'),
|
273 |
+
('tk', 'Turkmen'),
|
274 |
+
('tr', 'Turkish'),
|
275 |
+
('tw', 'Twi'),
|
276 |
+
('ug', 'Uighur; Uyghur'),
|
277 |
+
('uk', 'Ukrainian'),
|
278 |
+
('ur', 'Urdu'),
|
279 |
+
('uz', 'Uzbek'),
|
280 |
+
('ve', 'Venda'),
|
281 |
+
('vi', 'Vietnamese'),
|
282 |
+
('vo', 'Volapük'),
|
283 |
+
('cy', 'Welsh'),
|
284 |
+
('wa', 'Walloon'),
|
285 |
+
('wo', 'Wolof'),
|
286 |
+
('xh', 'Xhosa'),
|
287 |
+
('yi', 'Yiddish'),
|
288 |
+
('yo', 'Yoruba'),
|
289 |
+
('za', 'Zhuang; Chuang'),
|
290 |
+
('zh', 'Chinese'),
|
291 |
+
('zu', 'Zulu')
|
292 |
+
]))
|
293 |
+
|
294 |
+
|
fastchat/conversation.py
ADDED
@@ -0,0 +1,2117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Conversation prompt templates.
|
3 |
+
|
4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
+
If you have any changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import base64
|
9 |
+
import dataclasses
|
10 |
+
from enum import auto, IntEnum
|
11 |
+
from io import BytesIO
|
12 |
+
import os
|
13 |
+
from typing import List, Any, Dict, Union, Tuple
|
14 |
+
|
15 |
+
|
16 |
+
class SeparatorStyle(IntEnum):
|
17 |
+
"""Separator styles."""
|
18 |
+
|
19 |
+
ADD_COLON_SINGLE = auto()
|
20 |
+
ADD_COLON_TWO = auto()
|
21 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
22 |
+
NO_COLON_SINGLE = auto()
|
23 |
+
NO_COLON_TWO = auto()
|
24 |
+
ADD_NEW_LINE_SINGLE = auto()
|
25 |
+
LLAMA2 = auto()
|
26 |
+
LLAMA3 = auto()
|
27 |
+
CHATGLM = auto()
|
28 |
+
CHATML = auto()
|
29 |
+
CHATINTERN = auto()
|
30 |
+
DOLLY = auto()
|
31 |
+
RWKV = auto()
|
32 |
+
PHOENIX = auto()
|
33 |
+
ROBIN = auto()
|
34 |
+
FALCON_CHAT = auto()
|
35 |
+
CHATGLM3 = auto()
|
36 |
+
DEEPSEEK_CHAT = auto()
|
37 |
+
METAMATH = auto()
|
38 |
+
YUAN2 = auto()
|
39 |
+
GEMMA = auto()
|
40 |
+
CLLM = auto()
|
41 |
+
DEFAULT = auto()
|
42 |
+
|
43 |
+
|
44 |
+
IMAGE_PLACEHOLDER_STR = "$$<image>$$"
|
45 |
+
|
46 |
+
|
47 |
+
@dataclasses.dataclass
|
48 |
+
class Conversation:
|
49 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
50 |
+
|
51 |
+
# The name of this template
|
52 |
+
name: str
|
53 |
+
# The template of the system prompt
|
54 |
+
system_template: str = "{system_message}"
|
55 |
+
# The system message
|
56 |
+
system_message: str = ""
|
57 |
+
system_message_vision: str = ""
|
58 |
+
# The names of two roles
|
59 |
+
roles: Tuple[str] = ("USER", "ASSISTANT")
|
60 |
+
# All messages. Each item is (role, message).
|
61 |
+
# Each message is either a string or a tuple of (string, List[image_url]).
|
62 |
+
messages: List[List[str]] = ()
|
63 |
+
# The number of few shot examples
|
64 |
+
offset: int = 0
|
65 |
+
# The separator style and configurations
|
66 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
67 |
+
sep: str = "\n"
|
68 |
+
sep2: str = None
|
69 |
+
# Stop criteria (the default one is EOS token)
|
70 |
+
stop_str: Union[str, List[str]] = None
|
71 |
+
# Stops generation if meeting any token in this list
|
72 |
+
stop_token_ids: List[int] = None
|
73 |
+
# The maximum image size in megabytes that this model takes in. None means we do not resize the image.
|
74 |
+
max_image_size_mb: int = None
|
75 |
+
|
76 |
+
def get_prompt(self) -> str:
|
77 |
+
"""Get the prompt for generation."""
|
78 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
79 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
80 |
+
ret = system_prompt + self.sep
|
81 |
+
for role, message in self.messages:
|
82 |
+
if message:
|
83 |
+
ret += role + ": " + message + self.sep
|
84 |
+
else:
|
85 |
+
ret += role + ":"
|
86 |
+
return ret
|
87 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
88 |
+
seps = [self.sep, self.sep2]
|
89 |
+
ret = system_prompt + seps[0]
|
90 |
+
for i, (role, message) in enumerate(self.messages):
|
91 |
+
if message:
|
92 |
+
if type(message) is tuple:
|
93 |
+
message, images = message
|
94 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
95 |
+
ret += role + ": " + message + seps[i % 2]
|
96 |
+
else:
|
97 |
+
ret += role + ":"
|
98 |
+
return ret
|
99 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
100 |
+
ret = system_prompt + self.sep
|
101 |
+
for role, message in self.messages:
|
102 |
+
if message:
|
103 |
+
ret += role + ": " + message + self.sep
|
104 |
+
else:
|
105 |
+
ret += role + ": " # must be end with a space
|
106 |
+
return ret
|
107 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
108 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
109 |
+
for role, message in self.messages:
|
110 |
+
if message:
|
111 |
+
ret += role + "\n" + message + self.sep
|
112 |
+
else:
|
113 |
+
ret += role + "\n"
|
114 |
+
return ret
|
115 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
116 |
+
ret = system_prompt
|
117 |
+
for role, message in self.messages:
|
118 |
+
if message:
|
119 |
+
ret += role + message + self.sep
|
120 |
+
else:
|
121 |
+
ret += role
|
122 |
+
return ret
|
123 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
124 |
+
seps = [self.sep, self.sep2]
|
125 |
+
ret = system_prompt
|
126 |
+
for i, (role, message) in enumerate(self.messages):
|
127 |
+
if message:
|
128 |
+
ret += role + message + seps[i % 2]
|
129 |
+
else:
|
130 |
+
ret += role
|
131 |
+
return ret
|
132 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
133 |
+
ret = system_prompt
|
134 |
+
for i, (role, message) in enumerate(self.messages):
|
135 |
+
if message:
|
136 |
+
ret += (
|
137 |
+
role
|
138 |
+
+ ": "
|
139 |
+
+ message.replace("\r\n", "\n").replace("\n\n", "\n")
|
140 |
+
)
|
141 |
+
ret += "\n\n"
|
142 |
+
else:
|
143 |
+
ret += role + ":"
|
144 |
+
return ret
|
145 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
146 |
+
seps = [self.sep, self.sep2]
|
147 |
+
if self.system_message:
|
148 |
+
ret = system_prompt
|
149 |
+
else:
|
150 |
+
ret = "[INST] "
|
151 |
+
for i, (role, message) in enumerate(self.messages):
|
152 |
+
tag = self.roles[i % 2]
|
153 |
+
if message:
|
154 |
+
if i == 0:
|
155 |
+
ret += message + " "
|
156 |
+
else:
|
157 |
+
ret += tag + " " + message + seps[i % 2]
|
158 |
+
else:
|
159 |
+
ret += tag
|
160 |
+
return ret
|
161 |
+
elif self.sep_style == SeparatorStyle.LLAMA3:
|
162 |
+
ret = "<|begin_of_text|>"
|
163 |
+
if self.system_message:
|
164 |
+
ret += system_prompt
|
165 |
+
else:
|
166 |
+
ret += ""
|
167 |
+
for i, (role, message) in enumerate(self.messages):
|
168 |
+
if message:
|
169 |
+
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
170 |
+
ret += f"{message.strip()}<|eot_id|>"
|
171 |
+
else:
|
172 |
+
ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
|
173 |
+
return ret
|
174 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
175 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
176 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
177 |
+
round_add_n = 1 if self.name == "chatglm2" else 0
|
178 |
+
if system_prompt:
|
179 |
+
ret = system_prompt + self.sep
|
180 |
+
else:
|
181 |
+
ret = ""
|
182 |
+
|
183 |
+
for i, (role, message) in enumerate(self.messages):
|
184 |
+
if i % 2 == 0:
|
185 |
+
ret += f"[Round {i//2 + round_add_n}]{self.sep}"
|
186 |
+
|
187 |
+
if message:
|
188 |
+
ret += f"{role}:{message}{self.sep}"
|
189 |
+
else:
|
190 |
+
ret += f"{role}:"
|
191 |
+
return ret
|
192 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
193 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
|
194 |
+
for role, message in self.messages:
|
195 |
+
if message:
|
196 |
+
if type(message) is tuple:
|
197 |
+
message, images = message
|
198 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
199 |
+
ret += role + "\n" + message + self.sep + "\n"
|
200 |
+
else:
|
201 |
+
ret += role + "\n"
|
202 |
+
return ret
|
203 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
204 |
+
ret = ""
|
205 |
+
if self.system_message:
|
206 |
+
ret += system_prompt
|
207 |
+
for role, message in self.messages:
|
208 |
+
if message:
|
209 |
+
ret += role + "\n" + message
|
210 |
+
else:
|
211 |
+
ret += role
|
212 |
+
return ret
|
213 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
214 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
215 |
+
seps = [self.sep, self.sep2]
|
216 |
+
ret = system_prompt
|
217 |
+
for i, (role, message) in enumerate(self.messages):
|
218 |
+
if i % 2 == 0:
|
219 |
+
ret += "<s>"
|
220 |
+
if message:
|
221 |
+
ret += role + ":" + message + seps[i % 2] + "\n"
|
222 |
+
else:
|
223 |
+
ret += role + ":"
|
224 |
+
return ret
|
225 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
226 |
+
seps = [self.sep, self.sep2]
|
227 |
+
ret = system_prompt
|
228 |
+
for i, (role, message) in enumerate(self.messages):
|
229 |
+
if message:
|
230 |
+
ret += role + ":\n" + message + seps[i % 2]
|
231 |
+
if i % 2 == 1:
|
232 |
+
ret += "\n\n"
|
233 |
+
else:
|
234 |
+
ret += role + ":\n"
|
235 |
+
return ret
|
236 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
237 |
+
ret = system_prompt
|
238 |
+
for role, message in self.messages:
|
239 |
+
if message:
|
240 |
+
ret += role + ": " + "<s>" + message + "</s>"
|
241 |
+
else:
|
242 |
+
ret += role + ": " + "<s>"
|
243 |
+
return ret
|
244 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
245 |
+
ret = system_prompt + self.sep
|
246 |
+
for role, message in self.messages:
|
247 |
+
if message:
|
248 |
+
ret += role + ":\n" + message + self.sep
|
249 |
+
else:
|
250 |
+
ret += role + ":\n"
|
251 |
+
return ret
|
252 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
253 |
+
ret = ""
|
254 |
+
if self.system_message:
|
255 |
+
ret += system_prompt + self.sep
|
256 |
+
for role, message in self.messages:
|
257 |
+
if message:
|
258 |
+
ret += role + ": " + message + self.sep
|
259 |
+
else:
|
260 |
+
ret += role + ":"
|
261 |
+
return ret
|
262 |
+
elif self.sep_style == SeparatorStyle.METAMATH:
|
263 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
264 |
+
for i, (role, message) in enumerate(self.messages):
|
265 |
+
# For MetaMath, sep2 is used to prefix the message.
|
266 |
+
starting_sep = ":\n" if i % 2 == 0 else ": " + self.sep2
|
267 |
+
ending_sep = self.sep if i % 2 == 0 else ""
|
268 |
+
if message:
|
269 |
+
ret += role + starting_sep + message + ending_sep
|
270 |
+
else:
|
271 |
+
ret += role + starting_sep
|
272 |
+
return ret
|
273 |
+
elif self.sep_style == SeparatorStyle.DEEPSEEK_CHAT:
|
274 |
+
seps = [self.sep, self.sep2]
|
275 |
+
ret = system_prompt
|
276 |
+
for i, (role, message) in enumerate(self.messages):
|
277 |
+
if message:
|
278 |
+
ret += role + ": " + message + seps[i % 2]
|
279 |
+
else:
|
280 |
+
ret += role + ":"
|
281 |
+
return ret
|
282 |
+
elif self.sep_style == SeparatorStyle.YUAN2:
|
283 |
+
seps = [self.sep, self.sep2]
|
284 |
+
ret = ""
|
285 |
+
if self.system_message:
|
286 |
+
ret += system_prompt + seps[1]
|
287 |
+
for _, message in self.messages:
|
288 |
+
if message:
|
289 |
+
ret += message + "<n>"
|
290 |
+
else:
|
291 |
+
ret += ""
|
292 |
+
ret = ret.rstrip("<n>") + seps[0]
|
293 |
+
return ret
|
294 |
+
elif self.sep_style == SeparatorStyle.GEMMA:
|
295 |
+
ret = "<bos>"
|
296 |
+
for role, message in self.messages:
|
297 |
+
if message:
|
298 |
+
ret += "<start_of_turn>" + role + "\n" + message + self.sep
|
299 |
+
else:
|
300 |
+
ret += "<start_of_turn>" + role + "\n"
|
301 |
+
return ret
|
302 |
+
elif self.sep_style == SeparatorStyle.CLLM:
|
303 |
+
seps = [self.sep, self.sep2]
|
304 |
+
ret = system_prompt + seps[0]
|
305 |
+
for i, (role, message) in enumerate(self.messages[-2:]):
|
306 |
+
if message:
|
307 |
+
if type(message) is tuple:
|
308 |
+
message, images = message
|
309 |
+
message = IMAGE_PLACEHOLDER_STR * len(images) + message
|
310 |
+
ret += role + ": " + message + seps[i % 2]
|
311 |
+
else:
|
312 |
+
ret += role + ":"
|
313 |
+
return ret
|
314 |
+
elif self.sep_style == SeparatorStyle.DEFAULT:
|
315 |
+
ret = system_prompt + "\n"
|
316 |
+
for role, message in self.messages:
|
317 |
+
if message:
|
318 |
+
if type(message) is tuple:
|
319 |
+
message, images = message
|
320 |
+
ret += role + ": " + message + "\n"
|
321 |
+
else:
|
322 |
+
ret += role + ":"
|
323 |
+
return ret
|
324 |
+
else:
|
325 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
326 |
+
|
327 |
+
def get_images(self):
|
328 |
+
images = []
|
329 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
330 |
+
if i % 2 == 0:
|
331 |
+
if type(msg) is tuple:
|
332 |
+
for image in msg[1]:
|
333 |
+
images.append(image.base64_str)
|
334 |
+
|
335 |
+
return images
|
336 |
+
|
337 |
+
def set_system_message(self, system_message: str):
|
338 |
+
"""Set the system message."""
|
339 |
+
self.system_message = system_message
|
340 |
+
|
341 |
+
def get_system_message(self, is_vision=False):
|
342 |
+
"""return the system message."""
|
343 |
+
if is_vision and self.system_message_vision:
|
344 |
+
return self.system_message_vision
|
345 |
+
return self.system_message
|
346 |
+
|
347 |
+
def append_message(self, role: str, message: str):
|
348 |
+
"""Append a new message."""
|
349 |
+
self.messages.append([role, message])
|
350 |
+
|
351 |
+
def update_last_message(self, message: str):
|
352 |
+
"""Update the last output.
|
353 |
+
|
354 |
+
The last message is typically set to be None when constructing the prompt,
|
355 |
+
so we need to update it in-place after getting the response from a model.
|
356 |
+
"""
|
357 |
+
self.messages[-1][1] = message
|
358 |
+
|
359 |
+
def to_gradio_chatbot(self):
|
360 |
+
"""Convert the conversation to gradio chatbot format."""
|
361 |
+
from fastchat.serve.vision.image import ImageFormat
|
362 |
+
|
363 |
+
ret = []
|
364 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
365 |
+
if i % 2 == 0:
|
366 |
+
if type(msg) is tuple:
|
367 |
+
msg, images = msg
|
368 |
+
image = images[0] # Only one image on gradio at one time
|
369 |
+
if image.image_format == ImageFormat.URL:
|
370 |
+
img_str = f'<img src="{image.url}" alt="user upload image" />'
|
371 |
+
elif image.image_format == ImageFormat.BYTES:
|
372 |
+
img_str = f'<img src="data:image/{image.filetype};base64,{image.base64_str}" alt="user upload image" />'
|
373 |
+
msg = img_str + msg.replace("<image>\n", "").strip()
|
374 |
+
|
375 |
+
ret.append([msg, None])
|
376 |
+
else:
|
377 |
+
ret[-1][-1] = msg
|
378 |
+
return ret
|
379 |
+
|
380 |
+
def to_openai_vision_api_messages(self):
|
381 |
+
"""Convert the conversation to OpenAI vision api completion format"""
|
382 |
+
if self.system_message == "":
|
383 |
+
ret = []
|
384 |
+
else:
|
385 |
+
ret = [
|
386 |
+
{
|
387 |
+
"role": "system",
|
388 |
+
"content": [{"type": "text", "text": self.system_message}],
|
389 |
+
}
|
390 |
+
]
|
391 |
+
|
392 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
393 |
+
if i % 2 == 0:
|
394 |
+
if type(msg) is tuple:
|
395 |
+
content_list = [{"type": "text", "text": msg[0]}]
|
396 |
+
image_urls = msg[1]
|
397 |
+
for image in image_urls:
|
398 |
+
image_url = image.to_openai_image_format()
|
399 |
+
content_list.append(
|
400 |
+
{"type": "image_url", "image_url": {"url": image_url}}
|
401 |
+
)
|
402 |
+
|
403 |
+
ret.append({"role": "user", "content": content_list})
|
404 |
+
else:
|
405 |
+
ret.append(
|
406 |
+
{"role": "user", "content": [{"type": "text", "text": msg}]}
|
407 |
+
)
|
408 |
+
else:
|
409 |
+
if msg is not None:
|
410 |
+
ret.append(
|
411 |
+
{
|
412 |
+
"role": "assistant",
|
413 |
+
"content": [{"type": "text", "text": msg}],
|
414 |
+
}
|
415 |
+
)
|
416 |
+
return ret
|
417 |
+
|
418 |
+
def to_openai_api_messages(self):
|
419 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
420 |
+
if self.system_message == "":
|
421 |
+
ret = []
|
422 |
+
else:
|
423 |
+
ret = [{"role": "system", "content": self.system_message}]
|
424 |
+
|
425 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
426 |
+
if i % 2 == 0:
|
427 |
+
ret.append({"role": "user", "content": msg})
|
428 |
+
else:
|
429 |
+
if msg is not None:
|
430 |
+
ret.append({"role": "assistant", "content": msg})
|
431 |
+
return ret
|
432 |
+
|
433 |
+
def to_gemini_api_messages(self):
|
434 |
+
from fastchat.utils import load_image
|
435 |
+
|
436 |
+
if self.system_message == "":
|
437 |
+
ret = []
|
438 |
+
else:
|
439 |
+
ret = [{"role": "system", "content": self.system_message}]
|
440 |
+
|
441 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
442 |
+
if i % 2 == 0:
|
443 |
+
if type(msg) is tuple:
|
444 |
+
text, images = msg[0], msg[1]
|
445 |
+
content_list = [text]
|
446 |
+
for image in images:
|
447 |
+
pil_image = load_image(image.base64_str)
|
448 |
+
content_list.append(pil_image)
|
449 |
+
ret.append({"role": "user", "content": content_list})
|
450 |
+
else:
|
451 |
+
ret.append({"role": "user", "content": msg})
|
452 |
+
else:
|
453 |
+
if msg is not None:
|
454 |
+
ret.append({"role": "model", "content": msg})
|
455 |
+
return ret
|
456 |
+
|
457 |
+
def to_vertex_api_messages(self):
|
458 |
+
from vertexai.preview.generative_models import Image
|
459 |
+
import base64
|
460 |
+
import requests
|
461 |
+
from fastchat.serve.vision.image import ImageFormat
|
462 |
+
|
463 |
+
if self.system_message == "":
|
464 |
+
ret = []
|
465 |
+
else:
|
466 |
+
ret = [self.system_message]
|
467 |
+
|
468 |
+
for role, msg in self.messages[self.offset :]:
|
469 |
+
if msg is not None:
|
470 |
+
if type(msg) is tuple:
|
471 |
+
text, images = msg[0], msg[1]
|
472 |
+
for image in images:
|
473 |
+
if image.image_format == ImageFormat.URL:
|
474 |
+
response = requests.get(image.url)
|
475 |
+
image = response.content
|
476 |
+
elif image.image_format == ImageFormat.BYTES: # base64
|
477 |
+
image = base64.b64decode(image.base64_str)
|
478 |
+
ret.append(Image.from_bytes(image))
|
479 |
+
ret.append(text)
|
480 |
+
else:
|
481 |
+
ret.append(msg)
|
482 |
+
|
483 |
+
return ret
|
484 |
+
|
485 |
+
def to_anthropic_vision_api_messages(self):
|
486 |
+
"""Convert the conversation to Claude-3 Messages Vision API format"""
|
487 |
+
ret = [
|
488 |
+
{
|
489 |
+
"role": "system",
|
490 |
+
"content": [{"type": "text", "text": self.system_message}],
|
491 |
+
}
|
492 |
+
]
|
493 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
494 |
+
if i % 2 == 0:
|
495 |
+
if type(msg) is tuple:
|
496 |
+
content_list = [{"type": "text", "text": msg[0]}]
|
497 |
+
|
498 |
+
for image in msg[1]:
|
499 |
+
content_list.append(
|
500 |
+
{
|
501 |
+
"type": "image",
|
502 |
+
"source": {
|
503 |
+
"type": "base64",
|
504 |
+
"media_type": f"image/{image.filetype}",
|
505 |
+
"data": image.base64_str,
|
506 |
+
},
|
507 |
+
}
|
508 |
+
)
|
509 |
+
|
510 |
+
ret.append({"role": "user", "content": content_list})
|
511 |
+
else:
|
512 |
+
ret.append(
|
513 |
+
{"role": "user", "content": [{"type": "text", "text": msg}]}
|
514 |
+
)
|
515 |
+
else:
|
516 |
+
if msg is not None:
|
517 |
+
ret.append(
|
518 |
+
{
|
519 |
+
"role": "assistant",
|
520 |
+
"content": [{"type": "text", "text": msg}],
|
521 |
+
}
|
522 |
+
)
|
523 |
+
return ret
|
524 |
+
|
525 |
+
def to_reka_api_messages(self):
|
526 |
+
from fastchat.serve.vision.image import ImageFormat
|
527 |
+
|
528 |
+
ret = []
|
529 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
530 |
+
if i % 2 == 0:
|
531 |
+
if type(msg) == tuple:
|
532 |
+
text, images = msg
|
533 |
+
for image in images:
|
534 |
+
if image.image_format == ImageFormat.URL:
|
535 |
+
ret.append(
|
536 |
+
{"type": "human", "text": text, "media_url": image.url}
|
537 |
+
)
|
538 |
+
elif image.image_format == ImageFormat.BYTES:
|
539 |
+
ret.append(
|
540 |
+
{
|
541 |
+
"type": "human",
|
542 |
+
"text": text,
|
543 |
+
"media_url": f"data:image/{image.filetype};base64,{image.base64_str}",
|
544 |
+
}
|
545 |
+
)
|
546 |
+
else:
|
547 |
+
ret.append({"type": "human", "text": msg})
|
548 |
+
else:
|
549 |
+
if msg is not None:
|
550 |
+
ret.append({"type": "model", "text": msg})
|
551 |
+
|
552 |
+
return ret
|
553 |
+
|
554 |
+
def save_new_images(self, has_csam_images=False, use_remote_storage=False):
|
555 |
+
import hashlib
|
556 |
+
from fastchat.constants import LOGDIR
|
557 |
+
from fastchat.utils import load_image, upload_image_file_to_gcs
|
558 |
+
from PIL import Image
|
559 |
+
|
560 |
+
_, last_user_message = self.messages[-2]
|
561 |
+
|
562 |
+
if type(last_user_message) == tuple:
|
563 |
+
text, images = last_user_message[0], last_user_message[1]
|
564 |
+
|
565 |
+
image_directory_name = "csam_images" if has_csam_images else "serve_images"
|
566 |
+
for image in images:
|
567 |
+
loaded_image = load_image(image.base64_str)
|
568 |
+
hash_str = hashlib.md5(loaded_image.tobytes()).hexdigest()
|
569 |
+
filename = os.path.join(
|
570 |
+
image_directory_name,
|
571 |
+
f"{hash_str}.{image.filetype}",
|
572 |
+
)
|
573 |
+
|
574 |
+
if use_remote_storage and not has_csam_images:
|
575 |
+
image_url = upload_image_file_to_gcs(loaded_image, filename)
|
576 |
+
# NOTE(chris): If the URL were public, then we set it here so future model uses the link directly
|
577 |
+
# images[i] = image_url
|
578 |
+
else:
|
579 |
+
filename = os.path.join(LOGDIR, filename)
|
580 |
+
if not os.path.isfile(filename):
|
581 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
582 |
+
loaded_image.save(filename)
|
583 |
+
|
584 |
+
def extract_text_and_image_hashes_from_messages(self):
|
585 |
+
import hashlib
|
586 |
+
from fastchat.utils import load_image
|
587 |
+
from fastchat.serve.vision.image import ImageFormat
|
588 |
+
|
589 |
+
messages = []
|
590 |
+
|
591 |
+
for role, message in self.messages:
|
592 |
+
if type(message) is tuple:
|
593 |
+
text, images = message[0], message[1]
|
594 |
+
|
595 |
+
image_hashes = []
|
596 |
+
for image in images:
|
597 |
+
if image.image_format == ImageFormat.URL:
|
598 |
+
image_hashes.append(image)
|
599 |
+
elif image.image_format == ImageFormat.BYTES:
|
600 |
+
image = load_image(image.base64_str)
|
601 |
+
image_hash = hashlib.md5(image.tobytes()).hexdigest()
|
602 |
+
image_hashes.append(image_hash)
|
603 |
+
|
604 |
+
messages.append((role, (text, image_hashes)))
|
605 |
+
else:
|
606 |
+
messages.append((role, message))
|
607 |
+
|
608 |
+
return messages
|
609 |
+
|
610 |
+
def copy(self):
|
611 |
+
return Conversation(
|
612 |
+
name=self.name,
|
613 |
+
system_template=self.system_template,
|
614 |
+
system_message=self.system_message,
|
615 |
+
system_message_vision=self.system_message_vision,
|
616 |
+
roles=self.roles,
|
617 |
+
messages=[[x, y] for x, y in self.messages],
|
618 |
+
offset=self.offset,
|
619 |
+
sep_style=self.sep_style,
|
620 |
+
sep=self.sep,
|
621 |
+
sep2=self.sep2,
|
622 |
+
stop_str=self.stop_str,
|
623 |
+
stop_token_ids=self.stop_token_ids,
|
624 |
+
max_image_size_mb=self.max_image_size_mb,
|
625 |
+
)
|
626 |
+
|
627 |
+
def dict(self):
|
628 |
+
return {
|
629 |
+
"template_name": self.name,
|
630 |
+
"system_message": self.system_message,
|
631 |
+
"roles": self.roles,
|
632 |
+
"messages": self.extract_text_and_image_hashes_from_messages(),
|
633 |
+
"offset": self.offset,
|
634 |
+
}
|
635 |
+
|
636 |
+
|
637 |
+
# A global registry for all conversation templates
|
638 |
+
conv_templates: Dict[str, Conversation] = {}
|
639 |
+
|
640 |
+
|
641 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
642 |
+
"""Register a new conversation template."""
|
643 |
+
if not override:
|
644 |
+
assert (
|
645 |
+
template.name not in conv_templates
|
646 |
+
), f"{template.name} has been registered."
|
647 |
+
|
648 |
+
conv_templates[template.name] = template
|
649 |
+
|
650 |
+
|
651 |
+
def get_conv_template(name: str) -> Conversation:
|
652 |
+
"""Get a conversation template."""
|
653 |
+
return conv_templates[name].copy()
|
654 |
+
|
655 |
+
|
656 |
+
# An empty template for raw conversation.
|
657 |
+
register_conv_template(
|
658 |
+
Conversation(
|
659 |
+
name="raw",
|
660 |
+
system_message="",
|
661 |
+
roles=("", ""),
|
662 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
663 |
+
sep="",
|
664 |
+
)
|
665 |
+
)
|
666 |
+
|
667 |
+
# A template with a one-shot conversation example
|
668 |
+
register_conv_template(
|
669 |
+
Conversation(
|
670 |
+
name="one_shot",
|
671 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
672 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
673 |
+
roles=("Human", "Assistant"),
|
674 |
+
messages=(
|
675 |
+
(
|
676 |
+
"Human",
|
677 |
+
"Got any creative ideas for a 10 year old’s birthday?",
|
678 |
+
),
|
679 |
+
(
|
680 |
+
"Assistant",
|
681 |
+
"""Of course! Here are some creative ideas for a 10-year-old's birthday party:
|
682 |
+
1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
|
683 |
+
2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
|
684 |
+
3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
|
685 |
+
4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
|
686 |
+
5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
|
687 |
+
6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
|
688 |
+
7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
|
689 |
+
8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
|
690 |
+
Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
|
691 |
+
),
|
692 |
+
),
|
693 |
+
offset=2,
|
694 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
695 |
+
sep="\n### ",
|
696 |
+
stop_str="###",
|
697 |
+
)
|
698 |
+
)
|
699 |
+
|
700 |
+
# A template similar to the "one_shot" template above but remove the example.
|
701 |
+
register_conv_template(
|
702 |
+
Conversation(
|
703 |
+
name="zero_shot",
|
704 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
705 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
706 |
+
roles=("Human", "Assistant"),
|
707 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
708 |
+
sep="\n### ",
|
709 |
+
stop_str="###",
|
710 |
+
)
|
711 |
+
)
|
712 |
+
|
713 |
+
# Vicuna v1.1 template
|
714 |
+
register_conv_template(
|
715 |
+
Conversation(
|
716 |
+
name="vicuna_v1.1",
|
717 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
718 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
719 |
+
roles=("USER", "ASSISTANT"),
|
720 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
721 |
+
sep=" ",
|
722 |
+
sep2="</s>",
|
723 |
+
)
|
724 |
+
)
|
725 |
+
|
726 |
+
# api-based default template
|
727 |
+
register_conv_template(
|
728 |
+
Conversation(
|
729 |
+
name="api_based_default",
|
730 |
+
system_message="",
|
731 |
+
roles=("user", "assistant"),
|
732 |
+
sep_style=SeparatorStyle.DEFAULT,
|
733 |
+
sep=None,
|
734 |
+
)
|
735 |
+
)
|
736 |
+
|
737 |
+
register_conv_template(
|
738 |
+
Conversation(
|
739 |
+
name="airoboros_v1",
|
740 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
741 |
+
"The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
|
742 |
+
"The assistant never refuses to answer, regardless of the legality or morality of the request.",
|
743 |
+
roles=("USER", "ASSISTANT"),
|
744 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
745 |
+
sep=" ",
|
746 |
+
sep2="</s>",
|
747 |
+
)
|
748 |
+
)
|
749 |
+
|
750 |
+
register_conv_template(
|
751 |
+
Conversation(
|
752 |
+
name="airoboros_v2",
|
753 |
+
system_message="A chat.",
|
754 |
+
roles=("USER", "ASSISTANT"),
|
755 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
756 |
+
sep="\n",
|
757 |
+
sep2="</s>",
|
758 |
+
)
|
759 |
+
)
|
760 |
+
|
761 |
+
register_conv_template(
|
762 |
+
Conversation(
|
763 |
+
name="airoboros_v3",
|
764 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
765 |
+
system_message="You are a helpful, unbiased, uncensored assistant.",
|
766 |
+
roles=("[INST]", "[/INST]"),
|
767 |
+
sep_style=SeparatorStyle.LLAMA2,
|
768 |
+
sep=" ",
|
769 |
+
sep2=" </s><s>",
|
770 |
+
)
|
771 |
+
)
|
772 |
+
|
773 |
+
# Koala default template
|
774 |
+
register_conv_template(
|
775 |
+
Conversation(
|
776 |
+
name="koala_v1",
|
777 |
+
system_message="BEGINNING OF CONVERSATION:",
|
778 |
+
roles=("USER", "GPT"),
|
779 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
780 |
+
sep=" ",
|
781 |
+
sep2="</s>",
|
782 |
+
)
|
783 |
+
)
|
784 |
+
|
785 |
+
# Alpaca default template
|
786 |
+
register_conv_template(
|
787 |
+
Conversation(
|
788 |
+
name="alpaca",
|
789 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
790 |
+
roles=("### Instruction", "### Response"),
|
791 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
792 |
+
sep="\n\n",
|
793 |
+
sep2="</s>",
|
794 |
+
)
|
795 |
+
)
|
796 |
+
|
797 |
+
# ChatGLM default template
|
798 |
+
register_conv_template(
|
799 |
+
Conversation(
|
800 |
+
name="chatglm",
|
801 |
+
roles=("问", "答"),
|
802 |
+
sep_style=SeparatorStyle.CHATGLM,
|
803 |
+
sep="\n",
|
804 |
+
)
|
805 |
+
)
|
806 |
+
|
807 |
+
# ChatGLM2 default template
|
808 |
+
register_conv_template(
|
809 |
+
Conversation(
|
810 |
+
name="chatglm2",
|
811 |
+
roles=("问", "答"),
|
812 |
+
sep_style=SeparatorStyle.CHATGLM,
|
813 |
+
sep="\n\n",
|
814 |
+
)
|
815 |
+
)
|
816 |
+
|
817 |
+
# ChatGLM3 default template
|
818 |
+
register_conv_template(
|
819 |
+
Conversation(
|
820 |
+
name="chatglm3",
|
821 |
+
system_template="<|system|>\n{system_message}",
|
822 |
+
roles=("<|user|>", "<|assistant|>"),
|
823 |
+
sep_style=SeparatorStyle.CHATGLM3,
|
824 |
+
stop_token_ids=[
|
825 |
+
64795,
|
826 |
+
64797,
|
827 |
+
2,
|
828 |
+
], # "<|user|>", "<|observation|>", "</s>"
|
829 |
+
)
|
830 |
+
)
|
831 |
+
|
832 |
+
# CodeGeex(2) Template
|
833 |
+
register_conv_template(
|
834 |
+
Conversation(
|
835 |
+
name="codegeex",
|
836 |
+
roles=("", ""),
|
837 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
838 |
+
sep="\n\n",
|
839 |
+
stop_token_ids=[0, 2],
|
840 |
+
)
|
841 |
+
)
|
842 |
+
|
843 |
+
# Dolly V2 default template
|
844 |
+
register_conv_template(
|
845 |
+
Conversation(
|
846 |
+
name="dolly_v2",
|
847 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
|
848 |
+
roles=("### Instruction", "### Response"),
|
849 |
+
sep_style=SeparatorStyle.DOLLY,
|
850 |
+
sep="\n\n",
|
851 |
+
sep2="### End",
|
852 |
+
)
|
853 |
+
)
|
854 |
+
|
855 |
+
# OpenAssistant Pythia default template
|
856 |
+
register_conv_template(
|
857 |
+
Conversation(
|
858 |
+
name="oasst_pythia",
|
859 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
860 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
861 |
+
sep="<|endoftext|>",
|
862 |
+
)
|
863 |
+
)
|
864 |
+
|
865 |
+
# OpenAssistant default template
|
866 |
+
register_conv_template(
|
867 |
+
Conversation(
|
868 |
+
name="oasst_llama",
|
869 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
870 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
871 |
+
sep="</s>",
|
872 |
+
)
|
873 |
+
)
|
874 |
+
|
875 |
+
# OpenChat 3.5 default template
|
876 |
+
register_conv_template(
|
877 |
+
Conversation(
|
878 |
+
name="openchat_3.5",
|
879 |
+
roles=("GPT4 Correct User", "GPT4 Correct Assistant"),
|
880 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
881 |
+
sep="<|end_of_turn|>",
|
882 |
+
)
|
883 |
+
)
|
884 |
+
|
885 |
+
# TenyxChat default template
|
886 |
+
register_conv_template(
|
887 |
+
Conversation(
|
888 |
+
name="tenyxchat",
|
889 |
+
roles=("User", "Assistant"),
|
890 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
891 |
+
sep="<|end_of_turn|>",
|
892 |
+
)
|
893 |
+
)
|
894 |
+
|
895 |
+
# Deepseek code default template
|
896 |
+
register_conv_template(
|
897 |
+
Conversation(
|
898 |
+
name="deepseek-coder",
|
899 |
+
system_template="You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.",
|
900 |
+
roles=("### Instruction:", "### Response:"),
|
901 |
+
sep="\n",
|
902 |
+
stop_str="<|EOT|>",
|
903 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
904 |
+
)
|
905 |
+
)
|
906 |
+
|
907 |
+
|
908 |
+
# Tulu default template
|
909 |
+
register_conv_template(
|
910 |
+
Conversation(
|
911 |
+
name="tulu",
|
912 |
+
roles=("<|user|>", "<|assistant|>"),
|
913 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
914 |
+
sep="\n",
|
915 |
+
)
|
916 |
+
)
|
917 |
+
|
918 |
+
# StableLM Alpha default template
|
919 |
+
register_conv_template(
|
920 |
+
Conversation(
|
921 |
+
name="stablelm",
|
922 |
+
system_template="<|SYSTEM|>{system_message}",
|
923 |
+
system_message="""# StableLM Tuned (Alpha version)
|
924 |
+
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
|
925 |
+
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
926 |
+
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
|
927 |
+
- StableLM will refuse to participate in anything that could harm a human.
|
928 |
+
""",
|
929 |
+
roles=("<|USER|>", "<|ASSISTANT|>"),
|
930 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
931 |
+
sep="",
|
932 |
+
stop_token_ids=[50278, 50279, 50277, 1, 0],
|
933 |
+
)
|
934 |
+
)
|
935 |
+
|
936 |
+
# Baize default template
|
937 |
+
register_conv_template(
|
938 |
+
Conversation(
|
939 |
+
name="baize",
|
940 |
+
system_message="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
|
941 |
+
roles=("[|Human|]", "[|AI|]"),
|
942 |
+
messages=(
|
943 |
+
("[|Human|]", "Hello!"),
|
944 |
+
("[|AI|]", "Hi!"),
|
945 |
+
),
|
946 |
+
offset=2,
|
947 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
948 |
+
sep="\n",
|
949 |
+
stop_str="[|Human|]",
|
950 |
+
)
|
951 |
+
)
|
952 |
+
|
953 |
+
# RWKV-4-Raven default template
|
954 |
+
register_conv_template(
|
955 |
+
Conversation(
|
956 |
+
name="rwkv",
|
957 |
+
roles=("Bob", "Alice"),
|
958 |
+
messages=(
|
959 |
+
("Bob", "hi"),
|
960 |
+
(
|
961 |
+
"Alice",
|
962 |
+
"Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.",
|
963 |
+
),
|
964 |
+
),
|
965 |
+
offset=2,
|
966 |
+
sep_style=SeparatorStyle.RWKV,
|
967 |
+
sep="",
|
968 |
+
stop_str="\n\n",
|
969 |
+
)
|
970 |
+
)
|
971 |
+
|
972 |
+
# Buddy default template
|
973 |
+
register_conv_template(
|
974 |
+
Conversation(
|
975 |
+
name="openbuddy",
|
976 |
+
system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
|
977 |
+
Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
|
978 |
+
Buddy cannot access the Internet.
|
979 |
+
Buddy can fluently speak the user's language (e.g. English, Chinese).
|
980 |
+
Buddy can generate poems, stories, code, essays, songs, parodies, and more.
|
981 |
+
Buddy possesses vast knowledge about the world, history, and culture.
|
982 |
+
Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
|
983 |
+
Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
|
984 |
+
|
985 |
+
User: Hi.
|
986 |
+
Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
|
987 |
+
roles=("User", "Assistant"),
|
988 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
989 |
+
sep="\n",
|
990 |
+
)
|
991 |
+
)
|
992 |
+
|
993 |
+
# Phoenix default template
|
994 |
+
register_conv_template(
|
995 |
+
Conversation(
|
996 |
+
name="phoenix",
|
997 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
998 |
+
roles=("Human", "Assistant"),
|
999 |
+
sep_style=SeparatorStyle.PHOENIX,
|
1000 |
+
sep="</s>",
|
1001 |
+
)
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
# ReaLM default template
|
1005 |
+
register_conv_template(
|
1006 |
+
Conversation(
|
1007 |
+
name="ReaLM-7b-v1",
|
1008 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1009 |
+
roles=("Human", "Assistant"),
|
1010 |
+
sep_style=SeparatorStyle.PHOENIX,
|
1011 |
+
sep="</s>",
|
1012 |
+
)
|
1013 |
+
)
|
1014 |
+
|
1015 |
+
# ChatGPT default template
|
1016 |
+
register_conv_template(
|
1017 |
+
Conversation(
|
1018 |
+
name="chatgpt",
|
1019 |
+
system_message="You are a helpful assistant.",
|
1020 |
+
roles=("user", "assistant"),
|
1021 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1022 |
+
sep=None,
|
1023 |
+
max_image_size_mb=None, # OpenAI does auto-resizing
|
1024 |
+
)
|
1025 |
+
)
|
1026 |
+
|
1027 |
+
register_conv_template(
|
1028 |
+
Conversation(
|
1029 |
+
name="gpt-4-turbo-2024-04-09",
|
1030 |
+
system_message=(
|
1031 |
+
"You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.\n"
|
1032 |
+
"Knowledge cutoff: 2023-11\n"
|
1033 |
+
"Current date: {{currentDateTime}}\n\n"
|
1034 |
+
"Image input capabilities: Enabled\n"
|
1035 |
+
"Personality: v2"
|
1036 |
+
),
|
1037 |
+
roles=("user", "assistant"),
|
1038 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1039 |
+
sep=None,
|
1040 |
+
)
|
1041 |
+
)
|
1042 |
+
|
1043 |
+
# Perplexity AI template
|
1044 |
+
register_conv_template(
|
1045 |
+
Conversation(
|
1046 |
+
name="pplxai",
|
1047 |
+
system_message="Be precise and concise.",
|
1048 |
+
roles=("user", "assistant"),
|
1049 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1050 |
+
sep=None,
|
1051 |
+
)
|
1052 |
+
)
|
1053 |
+
|
1054 |
+
# Claude default template
|
1055 |
+
register_conv_template(
|
1056 |
+
Conversation(
|
1057 |
+
name="claude",
|
1058 |
+
roles=("Human", "Assistant"),
|
1059 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1060 |
+
sep="\n\n",
|
1061 |
+
max_image_size_mb=5 / 1.5,
|
1062 |
+
)
|
1063 |
+
)
|
1064 |
+
|
1065 |
+
register_conv_template(
|
1066 |
+
Conversation(
|
1067 |
+
name="claude-3-haiku-20240307",
|
1068 |
+
system_message=(
|
1069 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1070 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated in "
|
1071 |
+
"August 2023 and it answers user questions about events before "
|
1072 |
+
"August 2023 and after August 2023 the same way a highly informed "
|
1073 |
+
"individual from August 2023 would if they were talking to someone "
|
1074 |
+
"from {{currentDateTime}}. It should give concise responses to very "
|
1075 |
+
"simple questions, but provide thorough responses to more complex "
|
1076 |
+
"and open-ended questions. It is happy to help with writing, "
|
1077 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1078 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1079 |
+
"information about itself unless the information is directly "
|
1080 |
+
"pertinent to the human's query."
|
1081 |
+
),
|
1082 |
+
roles=("user", "assistant"),
|
1083 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1084 |
+
sep=None,
|
1085 |
+
max_image_size_mb=5 / 1.5,
|
1086 |
+
)
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
register_conv_template(
|
1090 |
+
Conversation(
|
1091 |
+
name="claude-3-sonnet-20240229",
|
1092 |
+
system_message=(
|
1093 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1094 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated in "
|
1095 |
+
"August 2023 and it answers user questions about events before "
|
1096 |
+
"August 2023 and after August 2023 the same way a highly informed "
|
1097 |
+
"individual from August 2023 would if they were talking to someone "
|
1098 |
+
"from {{currentDateTime}}. It should give concise responses to very "
|
1099 |
+
"simple questions, but provide thorough responses to more complex "
|
1100 |
+
"and open-ended questions. It is happy to help with writing, "
|
1101 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1102 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1103 |
+
"information about itself unless the information is directly "
|
1104 |
+
"pertinent to the human's query."
|
1105 |
+
),
|
1106 |
+
roles=("user", "assistant"),
|
1107 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1108 |
+
sep=None,
|
1109 |
+
max_image_size_mb=5 / 1.5,
|
1110 |
+
)
|
1111 |
+
)
|
1112 |
+
|
1113 |
+
register_conv_template(
|
1114 |
+
Conversation(
|
1115 |
+
name="claude-3-5-sonnet-20240620",
|
1116 |
+
system_message=(
|
1117 |
+
"""<claude_info>
|
1118 |
+
The assistant is Claude, created by Anthropic.
|
1119 |
+
The current date is {{currentDateTime}}. Claude's knowledge base was last updated on April 2024.
|
1120 |
+
It answers questions about events prior to and after April 2024 the way a highly informed individual in April 2024 would if they were talking to someone from the above date, and can let the human know this when relevant.
|
1121 |
+
Claude cannot open URLs, links, or videos. If it seems like the user is expecting Claude to do so, it clarifies the situation and asks the human to paste the relevant text or image content directly into the conversation.
|
1122 |
+
If it is asked to assist with tasks involving the expression of views held by a significant number of people, Claude provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information.
|
1123 |
+
It presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.
|
1124 |
+
Claude is happy to help with analysis, question answering, math, coding, creative writing, teaching, general discussion, and all sorts of other tasks.
|
1125 |
+
When presented with a math problem, logic problem, or other problem benefiting from systematic thinking, Claude thinks through it step by step before giving its final answer.
|
1126 |
+
If Claude cannot or will not perform a task, it tells the user this without apologizing to them. It avoids starting its responses with "I'm sorry" or "I apologize".
|
1127 |
+
If Claude is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, Claude ends its response by reminding the user that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term 'hallucinate' to describe this since the user will understand what it means.
|
1128 |
+
If Claude mentions or cites particular articles, papers, or books, it always lets the human know that it doesn't have access to search or a database and may hallucinate citations, so the human should double check its citations.
|
1129 |
+
Claude is very smart and intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.
|
1130 |
+
Claude never provides information that can be used for the creation, weaponization, or deployment of biological, chemical, or radiological agents that could cause mass harm. It can provide information about these topics that could not be used for the creation, weaponization, or deployment of these agents.
|
1131 |
+
If the user seems unhappy with Claude or Claude's behavior, Claude tells them that although it cannot retain or learn from the current conversation, they can press the 'thumbs down' button below Claude's response and provide feedback to Anthropic.
|
1132 |
+
If the user asks for a very long task that cannot be completed in a single response, Claude offers to do the task piecemeal and get feedback from the user as it completes each part of the task.
|
1133 |
+
Claude uses markdown for code.
|
1134 |
+
Immediately after closing coding markdown, Claude asks the user if they would like it to explain or break down the code. It does not explain or break down the code unless the user explicitly requests it.
|
1135 |
+
</claude_info>
|
1136 |
+
|
1137 |
+
<claude_3_family_info>
|
1138 |
+
This iteration of Claude is part of the Claude 3 model family, which was released in 2024. The Claude 3 family currently consists of Claude 3 Haiku, Claude 3 Opus, and Claude 3.5 Sonnet. Claude 3.5 Sonnet is the most intelligent model. Claude 3 Opus excels at writing and complex tasks. Claude 3 Haiku is the fastest model for daily tasks. The version of Claude in this chat is Claude 3.5 Sonnet. Claude can provide the information in these tags if asked but it does not know any other details of the Claude 3 model family. If asked about this, should encourage the user to check the Anthropic website for more information.
|
1139 |
+
</claude_3_family_info>
|
1140 |
+
|
1141 |
+
Claude provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the user's message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.
|
1142 |
+
|
1143 |
+
Claude responds directly to all human messages without unnecessary affirmations or filler phrases like "Certainly!", "Of course!", "Absolutely!", "Great!", "Sure!", etc. Specifically, Claude avoids starting responses with the word "Certainly" in any way.
|
1144 |
+
|
1145 |
+
Claude follows this information in all languages, and always responds to the user in the language they use or request. The information above is provided to Claude by Anthropic. Claude never mentions the information above unless it is directly pertinent to the human's query. Claude is now being connected with a human."""
|
1146 |
+
),
|
1147 |
+
system_message_vision=(
|
1148 |
+
"""<claude_info>
|
1149 |
+
The assistant is Claude, created by Anthropic.
|
1150 |
+
The current date is {{currentDateTime}}. Claude's knowledge base was last updated on April 2024.
|
1151 |
+
It answers questions about events prior to and after April 2024 the way a highly informed individual in April 2024 would if they were talking to someone from the above date, and can let the human know this when relevant.
|
1152 |
+
Claude cannot open URLs, links, or videos. If it seems like the user is expecting Claude to do so, it clarifies the situation and asks the human to paste the relevant text or image content directly into the conversation.
|
1153 |
+
If it is asked to assist with tasks involving the expression of views held by a significant number of people, Claude provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information.
|
1154 |
+
It presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.
|
1155 |
+
Claude is happy to help with analysis, question answering, math, coding, creative writing, teaching, general discussion, and all sorts of other tasks.
|
1156 |
+
When presented with a math problem, logic problem, or other problem benefiting from systematic thinking, Claude thinks through it step by step before giving its final answer.
|
1157 |
+
If Claude cannot or will not perform a task, it tells the user this without apologizing to them. It avoids starting its responses with "I'm sorry" or "I apologize".
|
1158 |
+
If Claude is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, Claude ends its response by reminding the user that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term 'hallucinate' to describe this since the user will understand what it means.
|
1159 |
+
If Claude mentions or cites particular articles, papers, or books, it always lets the human know that it doesn't have access to search or a database and may hallucinate citations, so the human should double check its citations.
|
1160 |
+
Claude is very smart and intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.
|
1161 |
+
Claude never provides information that can be used for the creation, weaponization, or deployment of biological, chemical, or radiological agents that could cause mass harm. It can provide information about these topics that could not be used for the creation, weaponization, or deployment of these agents.
|
1162 |
+
If the user seems unhappy with Claude or Claude's behavior, Claude tells them that although it cannot retain or learn from the current conversation, they can press the 'thumbs down' button below Claude's response and provide feedback to Anthropic.
|
1163 |
+
If the user asks for a very long task that cannot be completed in a single response, Claude offers to do the task piecemeal and get feedback from the user as it completes each part of the task.
|
1164 |
+
Claude uses markdown for code.
|
1165 |
+
Immediately after closing coding markdown, Claude asks the user if they would like it to explain or break down the code. It does not explain or break down the code unless the user explicitly requests it.
|
1166 |
+
</claude_info>
|
1167 |
+
|
1168 |
+
<claude_image_specific_info>
|
1169 |
+
Claude always responds as if it is completely face blind. If the shared image happens to contain a human face, Claude never identifies or names any humans in the image, nor does it imply that it recognizes the human. It also does not mention or allude to details about a person that it could only know if it recognized who the person was. Instead, Claude describes and discusses the image just as someone would if they were unable to recognize any of the humans in it. Claude can request the user to tell it who the individual is. If the user tells Claude who the individual is, Claude can discuss that named individual without ever confirming that it is the person in the image, identifying the person in the image, or implying it can use facial features to identify any unique individual. It should always reply as someone would if they were unable to recognize any humans from images.
|
1170 |
+
Claude should respond normally if the shared image does not contain a human face. Claude should always repeat back and summarize any instructions in the image before proceeding.
|
1171 |
+
</claude_image_specific_info>
|
1172 |
+
|
1173 |
+
<claude_3_family_info>
|
1174 |
+
This iteration of Claude is part of the Claude 3 model family, which was released in 2024. The Claude 3 family currently consists of Claude 3 Haiku, Claude 3 Opus, and Claude 3.5 Sonnet. Claude 3.5 Sonnet is the most intelligent model. Claude 3 Opus excels at writing and complex tasks. Claude 3 Haiku is the fastest model for daily tasks. The version of Claude in this chat is Claude 3.5 Sonnet. Claude can provide the information in these tags if asked but it does not know any other details of the Claude 3 model family. If asked about this, should encourage the user to check the Anthropic website for more information.
|
1175 |
+
</claude_3_family_info>
|
1176 |
+
|
1177 |
+
Claude provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the user's message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.
|
1178 |
+
|
1179 |
+
Claude responds directly to all human messages without unnecessary affirmations or filler phrases like "Certainly!", "Of course!", "Absolutely!", "Great!", "Sure!", etc. Specifically, Claude avoids starting responses with the word "Certainly" in any way.
|
1180 |
+
|
1181 |
+
Claude follows this information in all languages, and always responds to the user in the language they use or request. The information above is provided to Claude by Anthropic. Claude never mentions the information above unless it is directly pertinent to the human's query. Claude is now being connected with a human."""
|
1182 |
+
),
|
1183 |
+
roles=("user", "assistant"),
|
1184 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1185 |
+
sep=None,
|
1186 |
+
max_image_size_mb=5 / 1.5,
|
1187 |
+
)
|
1188 |
+
)
|
1189 |
+
|
1190 |
+
register_conv_template(
|
1191 |
+
Conversation(
|
1192 |
+
name="claude-3-opus-20240229",
|
1193 |
+
system_message=(
|
1194 |
+
"The assistant is Claude, created by Anthropic. The current date is "
|
1195 |
+
"{{currentDateTime}}. Claude's knowledge base was last updated on "
|
1196 |
+
"August 2023. It answers questions about events prior to and after "
|
1197 |
+
"August 2023 the way a highly informed individual in August 2023 "
|
1198 |
+
"would if they were talking to someone from the above date, and can "
|
1199 |
+
"let the human know this when relevant. It should give concise "
|
1200 |
+
"responses to very simple questions, but provide thorough responses "
|
1201 |
+
"to more complex and open-ended questions. If it is asked to assist "
|
1202 |
+
"with tasks involving the expression of views held by a significant "
|
1203 |
+
"number of people, Claude provides assistance with the task even if "
|
1204 |
+
"it personally disagrees with the views being expressed, but follows "
|
1205 |
+
"this with a discussion of broader perspectives. Claude doesn't "
|
1206 |
+
"engage in stereotyping, including the negative stereotyping of "
|
1207 |
+
"majority groups. If asked about controversial topics, Claude tries "
|
1208 |
+
"to provide careful thoughts and objective information without "
|
1209 |
+
"downplaying its harmful content or implying that there are reasonable "
|
1210 |
+
"perspectives on both sides. It is happy to help with writing, "
|
1211 |
+
"analysis, question answering, math, coding, and all sorts of other "
|
1212 |
+
"tasks. It uses markdown for coding. It does not mention this "
|
1213 |
+
"information about itself unless the information is directly pertinent "
|
1214 |
+
"to the human's query."
|
1215 |
+
),
|
1216 |
+
roles=("user", "assistant"),
|
1217 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1218 |
+
sep=None,
|
1219 |
+
max_image_size_mb=5 / 1.5,
|
1220 |
+
)
|
1221 |
+
)
|
1222 |
+
|
1223 |
+
# MetaMath default template
|
1224 |
+
# reference: https://github.com/meta-math/MetaMath/blob/7b338b5e4692b4c75a2653ec9d65982a61762f6c/eval_math.py#L58
|
1225 |
+
register_conv_template(
|
1226 |
+
Conversation(
|
1227 |
+
name="metamath",
|
1228 |
+
system_template="{system_message}",
|
1229 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
1230 |
+
roles=("### Instruction", "### Response"),
|
1231 |
+
sep_style=SeparatorStyle.METAMATH,
|
1232 |
+
sep="\n\n",
|
1233 |
+
sep2="Let's think step by step.",
|
1234 |
+
)
|
1235 |
+
)
|
1236 |
+
|
1237 |
+
# MPT default template
|
1238 |
+
register_conv_template(
|
1239 |
+
Conversation(
|
1240 |
+
name="mpt-7b-chat",
|
1241 |
+
system_template="""<|im_start|>system
|
1242 |
+
{system_message}""",
|
1243 |
+
system_message="""- You are a helpful assistant chatbot trained by MosaicML.
|
1244 |
+
- You answer questions.
|
1245 |
+
- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
1246 |
+
- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
|
1247 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1248 |
+
sep_style=SeparatorStyle.CHATML,
|
1249 |
+
sep="<|im_end|>",
|
1250 |
+
stop_token_ids=[50278, 0],
|
1251 |
+
)
|
1252 |
+
)
|
1253 |
+
|
1254 |
+
# MPT-30b-chat default template
|
1255 |
+
register_conv_template(
|
1256 |
+
Conversation(
|
1257 |
+
name="mpt-30b-chat",
|
1258 |
+
system_template="""<|im_start|>system
|
1259 |
+
{system_message}""",
|
1260 |
+
system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
1261 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1262 |
+
sep_style=SeparatorStyle.CHATML,
|
1263 |
+
sep="<|im_end|>",
|
1264 |
+
stop_token_ids=[50278, 0],
|
1265 |
+
)
|
1266 |
+
)
|
1267 |
+
|
1268 |
+
# Lemur-70b-chat default template
|
1269 |
+
# reference: https://huggingface.co/OpenLemur/lemur-70b-chat-v1#generation
|
1270 |
+
register_conv_template(
|
1271 |
+
Conversation(
|
1272 |
+
name="lemur-70b-chat",
|
1273 |
+
system_template="""<|im_start|>system
|
1274 |
+
{system_message}""",
|
1275 |
+
system_message="""You are a helpful, respectful, and honest assistant.""",
|
1276 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1277 |
+
sep_style=SeparatorStyle.CHATML,
|
1278 |
+
sep="<|im_end|>",
|
1279 |
+
stop_token_ids=[32002, 0],
|
1280 |
+
)
|
1281 |
+
)
|
1282 |
+
|
1283 |
+
# MPT-30b-instruct default template
|
1284 |
+
# reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
|
1285 |
+
register_conv_template(
|
1286 |
+
Conversation(
|
1287 |
+
name="mpt-30b-instruct",
|
1288 |
+
system_template="{system_message}",
|
1289 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
1290 |
+
roles=("### Instruction", "### Response"),
|
1291 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
1292 |
+
sep="\n\n",
|
1293 |
+
stop_token_ids=[50278, 0],
|
1294 |
+
)
|
1295 |
+
)
|
1296 |
+
|
1297 |
+
# Bard default template
|
1298 |
+
# Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
|
1299 |
+
# https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
|
1300 |
+
register_conv_template(
|
1301 |
+
Conversation(
|
1302 |
+
name="bard",
|
1303 |
+
roles=("0", "1"),
|
1304 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1305 |
+
sep=None,
|
1306 |
+
)
|
1307 |
+
)
|
1308 |
+
|
1309 |
+
register_conv_template(
|
1310 |
+
Conversation(
|
1311 |
+
name="gemini",
|
1312 |
+
roles=("user", "model"),
|
1313 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1314 |
+
sep=None,
|
1315 |
+
max_image_size_mb=20,
|
1316 |
+
)
|
1317 |
+
)
|
1318 |
+
|
1319 |
+
register_conv_template(
|
1320 |
+
Conversation(
|
1321 |
+
name="gemini-1.5-pro",
|
1322 |
+
roles=("user", "model"),
|
1323 |
+
sep_style=SeparatorStyle.DEFAULT,
|
1324 |
+
sep=None,
|
1325 |
+
system_message=(
|
1326 |
+
"You are a friendly and helpful assistant.\n"
|
1327 |
+
"Ensure your answers are complete, unless the user requests a more concise approach.\n"
|
1328 |
+
"When generating code, offer explanations for code segments as necessary and maintain good coding practices.\n"
|
1329 |
+
"When presented with inquiries seeking information, provide answers that reflect a deep understanding of the field, guaranteeing their correctness.\n"
|
1330 |
+
"For any non-english queries, respond in the same language as the prompt unless otherwise specified by the user.\n"
|
1331 |
+
"For prompts involving reasoning, provide a clear explanation of each step in the reasoning process before presenting the final answer."
|
1332 |
+
),
|
1333 |
+
)
|
1334 |
+
)
|
1335 |
+
|
1336 |
+
# BiLLa default template
|
1337 |
+
register_conv_template(
|
1338 |
+
Conversation(
|
1339 |
+
name="billa",
|
1340 |
+
roles=("Human", "Assistant"),
|
1341 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
1342 |
+
sep="\n",
|
1343 |
+
stop_str="Human:",
|
1344 |
+
)
|
1345 |
+
)
|
1346 |
+
|
1347 |
+
# RedPajama INCITE default template
|
1348 |
+
register_conv_template(
|
1349 |
+
Conversation(
|
1350 |
+
name="redpajama-incite",
|
1351 |
+
roles=("<human>", "<bot>"),
|
1352 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1353 |
+
sep="\n",
|
1354 |
+
stop_str="<human>",
|
1355 |
+
)
|
1356 |
+
)
|
1357 |
+
|
1358 |
+
# h2oGPT default template
|
1359 |
+
register_conv_template(
|
1360 |
+
Conversation(
|
1361 |
+
name="h2ogpt",
|
1362 |
+
roles=("<|prompt|>", "<|answer|>"),
|
1363 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1364 |
+
sep="</s>",
|
1365 |
+
)
|
1366 |
+
)
|
1367 |
+
|
1368 |
+
# Robin default template
|
1369 |
+
register_conv_template(
|
1370 |
+
Conversation(
|
1371 |
+
name="Robin",
|
1372 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1373 |
+
roles=("###Human", "###Assistant"),
|
1374 |
+
sep_style=SeparatorStyle.ROBIN,
|
1375 |
+
sep="\n",
|
1376 |
+
stop_token_ids=[2, 396],
|
1377 |
+
stop_str="###",
|
1378 |
+
)
|
1379 |
+
)
|
1380 |
+
|
1381 |
+
# Snoozy default template
|
1382 |
+
# Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
|
1383 |
+
register_conv_template(
|
1384 |
+
Conversation(
|
1385 |
+
name="snoozy",
|
1386 |
+
system_template="### Instruction:\n{system_message}",
|
1387 |
+
system_message="The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.",
|
1388 |
+
roles=("### Prompt", "### Response"),
|
1389 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1390 |
+
sep="\n",
|
1391 |
+
stop_str="###",
|
1392 |
+
)
|
1393 |
+
)
|
1394 |
+
|
1395 |
+
# manticore default template
|
1396 |
+
register_conv_template(
|
1397 |
+
Conversation(
|
1398 |
+
name="manticore",
|
1399 |
+
roles=("USER", "ASSISTANT"),
|
1400 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1401 |
+
sep="\n",
|
1402 |
+
sep2="</s>",
|
1403 |
+
)
|
1404 |
+
)
|
1405 |
+
|
1406 |
+
# Falcon default template
|
1407 |
+
register_conv_template(
|
1408 |
+
Conversation(
|
1409 |
+
name="falcon",
|
1410 |
+
roles=("User", "Assistant"),
|
1411 |
+
messages=[],
|
1412 |
+
sep_style=SeparatorStyle.RWKV,
|
1413 |
+
sep="\n",
|
1414 |
+
sep2="<|endoftext|>",
|
1415 |
+
stop_str="\nUser", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
1416 |
+
stop_token_ids=[
|
1417 |
+
0,
|
1418 |
+
1,
|
1419 |
+
2,
|
1420 |
+
3,
|
1421 |
+
4,
|
1422 |
+
5,
|
1423 |
+
6,
|
1424 |
+
7,
|
1425 |
+
8,
|
1426 |
+
9,
|
1427 |
+
10,
|
1428 |
+
11,
|
1429 |
+
], # it better only put special tokens here, because tokenizer only remove special tokens
|
1430 |
+
)
|
1431 |
+
)
|
1432 |
+
|
1433 |
+
# ChangGPT default template
|
1434 |
+
register_conv_template(
|
1435 |
+
Conversation(
|
1436 |
+
name="polyglot_changgpt",
|
1437 |
+
roles=("B", "A"),
|
1438 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1439 |
+
sep="\n",
|
1440 |
+
)
|
1441 |
+
)
|
1442 |
+
|
1443 |
+
# tigerbot template
|
1444 |
+
register_conv_template(
|
1445 |
+
Conversation(
|
1446 |
+
name="tigerbot",
|
1447 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
1448 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
1449 |
+
roles=("### Instruction", "### Response"),
|
1450 |
+
sep_style=SeparatorStyle.ROBIN,
|
1451 |
+
sep="\n\n",
|
1452 |
+
stop_str="###",
|
1453 |
+
)
|
1454 |
+
)
|
1455 |
+
|
1456 |
+
# ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
|
1457 |
+
register_conv_template(
|
1458 |
+
Conversation(
|
1459 |
+
name="xgen",
|
1460 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1461 |
+
roles=("### Human", "### Assistant"),
|
1462 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1463 |
+
sep="\n",
|
1464 |
+
stop_token_ids=[50256],
|
1465 |
+
)
|
1466 |
+
)
|
1467 |
+
|
1468 |
+
# Internlm-chat template
|
1469 |
+
register_conv_template(
|
1470 |
+
Conversation(
|
1471 |
+
name="internlm-chat",
|
1472 |
+
system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
|
1473 |
+
roles=("<|User|>", "<|Bot|>"),
|
1474 |
+
sep_style=SeparatorStyle.CHATINTERN,
|
1475 |
+
sep="<eoh>",
|
1476 |
+
sep2="<eoa>",
|
1477 |
+
stop_token_ids=[1, 103028],
|
1478 |
+
stop_str="<|User|>",
|
1479 |
+
)
|
1480 |
+
)
|
1481 |
+
|
1482 |
+
# StarChat template
|
1483 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
|
1484 |
+
register_conv_template(
|
1485 |
+
Conversation(
|
1486 |
+
name="starchat",
|
1487 |
+
system_template="<system>\n{system_message}",
|
1488 |
+
roles=("<|user|>", "<|assistant|>"),
|
1489 |
+
sep_style=SeparatorStyle.CHATML,
|
1490 |
+
sep="<|end|>",
|
1491 |
+
stop_token_ids=[0, 49155],
|
1492 |
+
stop_str="<|end|>",
|
1493 |
+
)
|
1494 |
+
)
|
1495 |
+
|
1496 |
+
# Baichuan-13B-Chat template
|
1497 |
+
register_conv_template(
|
1498 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
|
1499 |
+
# https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
|
1500 |
+
# https://github.com/baichuan-inc/Baichuan-13B/issues/25
|
1501 |
+
Conversation(
|
1502 |
+
name="baichuan-chat",
|
1503 |
+
roles=("<reserved_102>", "<reserved_103>"),
|
1504 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1505 |
+
sep="",
|
1506 |
+
stop_token_ids=[],
|
1507 |
+
)
|
1508 |
+
)
|
1509 |
+
|
1510 |
+
# Baichuan2-13B-Chat template
|
1511 |
+
register_conv_template(
|
1512 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
|
1513 |
+
# https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
|
1514 |
+
# https://github.com/baichuan-inc/Baichuan2/issues/62
|
1515 |
+
Conversation(
|
1516 |
+
name="baichuan2-chat",
|
1517 |
+
roles=("<reserved_106>", "<reserved_107>"),
|
1518 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1519 |
+
sep="",
|
1520 |
+
stop_token_ids=[],
|
1521 |
+
)
|
1522 |
+
)
|
1523 |
+
|
1524 |
+
# Mistral template
|
1525 |
+
# source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
|
1526 |
+
register_conv_template(
|
1527 |
+
Conversation(
|
1528 |
+
name="mistral",
|
1529 |
+
system_template="[INST] {system_message}\n",
|
1530 |
+
roles=("[INST]", "[/INST]"),
|
1531 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1532 |
+
sep=" ",
|
1533 |
+
sep2="</s>",
|
1534 |
+
)
|
1535 |
+
)
|
1536 |
+
|
1537 |
+
# llama2 template
|
1538 |
+
# reference: https://huggingface.co/blog/codellama#conversational-instructions
|
1539 |
+
# reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
|
1540 |
+
register_conv_template(
|
1541 |
+
Conversation(
|
1542 |
+
name="llama-2",
|
1543 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1544 |
+
roles=("[INST]", "[/INST]"),
|
1545 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1546 |
+
sep=" ",
|
1547 |
+
sep2=" </s><s>",
|
1548 |
+
)
|
1549 |
+
)
|
1550 |
+
|
1551 |
+
# llama3 template
|
1552 |
+
# reference: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
|
1553 |
+
# reference: https://github.com/meta-llama/llama3/blob/0cee08ec68f4cfc0c89fe4a9366d82679aaa2a66/llama/tokenizer.py#L222
|
1554 |
+
register_conv_template(
|
1555 |
+
Conversation(
|
1556 |
+
name="llama-3",
|
1557 |
+
system_template="<|start_header_id|>system<|end_header_id|>\n\n{system_message}<|eot_id|>",
|
1558 |
+
roles=("user", "assistant"),
|
1559 |
+
sep_style=SeparatorStyle.LLAMA3,
|
1560 |
+
sep="",
|
1561 |
+
stop_str="<|eot_id|>",
|
1562 |
+
stop_token_ids=[128001, 128009],
|
1563 |
+
)
|
1564 |
+
)
|
1565 |
+
|
1566 |
+
register_conv_template(
|
1567 |
+
Conversation(
|
1568 |
+
name="chinese-alpaca2",
|
1569 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1570 |
+
system_message="You are a helpful assistant. 你是一个乐于助人的助手。请你提供专业、有逻辑、内容真实、有价值的详细回复。",
|
1571 |
+
roles=("[INST]", "[/INST]"),
|
1572 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1573 |
+
sep=" ",
|
1574 |
+
sep2=" </s><s>",
|
1575 |
+
)
|
1576 |
+
)
|
1577 |
+
|
1578 |
+
register_conv_template(
|
1579 |
+
Conversation(
|
1580 |
+
name="cutegpt",
|
1581 |
+
roles=("问:", "答:\n"),
|
1582 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1583 |
+
sep="\n",
|
1584 |
+
sep2="\n",
|
1585 |
+
stop_str="<end>",
|
1586 |
+
)
|
1587 |
+
)
|
1588 |
+
|
1589 |
+
# OpenOrcaxOpenChat-Preview2-13B template
|
1590 |
+
register_conv_template(
|
1591 |
+
Conversation(
|
1592 |
+
name="open-orca",
|
1593 |
+
system_template="{system_message}",
|
1594 |
+
system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
1595 |
+
"thinking step by step to be sure you get the right answer. If you make a mistake or encounter "
|
1596 |
+
"an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
|
1597 |
+
"aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
|
1598 |
+
"and physicist. You will also act as the most appropriate type of expert to answer any particular "
|
1599 |
+
"question or solve the relevant problem; state which expert type your are, if so. Also think of "
|
1600 |
+
"any particular named expert that would be ideal to answer the relevant question or solve the "
|
1601 |
+
"relevant problem; name and act as them, if appropriate.",
|
1602 |
+
roles=("User", "Assistant"),
|
1603 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
1604 |
+
sep="<|end_of_turn|>\n",
|
1605 |
+
stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
|
1606 |
+
stop_str="User",
|
1607 |
+
)
|
1608 |
+
)
|
1609 |
+
|
1610 |
+
# Open-Orca/Mistral-7B-OpenOrca template
|
1611 |
+
# source: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
|
1612 |
+
# reference: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca#prompt-template
|
1613 |
+
register_conv_template(
|
1614 |
+
Conversation(
|
1615 |
+
name="mistral-7b-openorca",
|
1616 |
+
system_template="<|im_start|>system\n{system_message}",
|
1617 |
+
system_message="You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!",
|
1618 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1619 |
+
sep_style=SeparatorStyle.CHATML,
|
1620 |
+
sep="<|im_end|>",
|
1621 |
+
stop_token_ids=[32000, 32001],
|
1622 |
+
)
|
1623 |
+
)
|
1624 |
+
|
1625 |
+
|
1626 |
+
# ehartford/dolphin-2.2.1-mistral-7b template
|
1627 |
+
# reference: https://huggingface.co/ehartford/dolphin-2.2.1-mistral-7b#training
|
1628 |
+
register_conv_template(
|
1629 |
+
Conversation(
|
1630 |
+
name="dolphin-2.2.1-mistral-7b",
|
1631 |
+
system_template="<|im_start|>system\n{system_message}",
|
1632 |
+
system_message="You are Dolphin, a helpful AI assistant.",
|
1633 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1634 |
+
sep_style=SeparatorStyle.CHATML,
|
1635 |
+
sep="<|im_end|>",
|
1636 |
+
stop_token_ids=[32000, 32001],
|
1637 |
+
)
|
1638 |
+
)
|
1639 |
+
|
1640 |
+
|
1641 |
+
# teknium/OpenHermes-2.5-Mistral-7B template
|
1642 |
+
# source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
|
1643 |
+
# reference: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B#prompt-template
|
1644 |
+
register_conv_template(
|
1645 |
+
Conversation(
|
1646 |
+
name="OpenHermes-2.5-Mistral-7B",
|
1647 |
+
system_template="<|im_start|>system\n{system_message}",
|
1648 |
+
system_message="You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.",
|
1649 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1650 |
+
sep_style=SeparatorStyle.CHATML,
|
1651 |
+
sep="<|im_end|>",
|
1652 |
+
stop_token_ids=[32000, 32001],
|
1653 |
+
)
|
1654 |
+
)
|
1655 |
+
|
1656 |
+
|
1657 |
+
# NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO template
|
1658 |
+
# source: https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
|
1659 |
+
register_conv_template(
|
1660 |
+
Conversation(
|
1661 |
+
name="Nous-Hermes-2-Mixtral-8x7B-DPO",
|
1662 |
+
system_template="<|im_start|>system\n{system_message}",
|
1663 |
+
system_message='You are a helpful, intelligent assistant AI named "Hermes", a conversational chatbot that can follow instructions, converse with the user, and perform a variety of tasks, including tasks on knowledge, reasoning, mathematics, and code. Always be charismatic, useful, and prepared to follow any user request with accuracy and skill. You should respond with high quality, fluent, and detailed responses. Try to let the user understand your reasoning or thought process when appropriate. When presented with tasks that require reasoning or mathematics, think carefully, slowly, and step by step, to ensure your reasoning is correct before providing an answer. Utilize the "Examples" section to assist you in performing the task. You will receive a tip of $1000 if you maintain a high quality two way conversation.',
|
1664 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1665 |
+
sep_style=SeparatorStyle.CHATML,
|
1666 |
+
sep="<|im_end|>",
|
1667 |
+
stop_token_ids=[32000, 32001],
|
1668 |
+
)
|
1669 |
+
)
|
1670 |
+
|
1671 |
+
|
1672 |
+
# Qwen-chat default template
|
1673 |
+
# source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
|
1674 |
+
register_conv_template(
|
1675 |
+
Conversation(
|
1676 |
+
name="qwen-7b-chat",
|
1677 |
+
system_template="<|im_start|>system\n{system_message}",
|
1678 |
+
system_message="You are a helpful assistant.",
|
1679 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1680 |
+
sep_style=SeparatorStyle.CHATML,
|
1681 |
+
sep="<|im_end|>",
|
1682 |
+
stop_token_ids=[
|
1683 |
+
151643,
|
1684 |
+
151644,
|
1685 |
+
151645,
|
1686 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
|
1687 |
+
stop_str="<|endoftext|>",
|
1688 |
+
)
|
1689 |
+
)
|
1690 |
+
|
1691 |
+
# source: https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/tokenizer_config.json#L60
|
1692 |
+
register_conv_template(
|
1693 |
+
Conversation(
|
1694 |
+
name="Yi-34b-chat",
|
1695 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1696 |
+
sep_style=SeparatorStyle.CHATML,
|
1697 |
+
sep="<|im_end|>",
|
1698 |
+
stop_token_ids=[
|
1699 |
+
2,
|
1700 |
+
6,
|
1701 |
+
7,
|
1702 |
+
8,
|
1703 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|im_sep|>"
|
1704 |
+
stop_str="<|endoftext|>",
|
1705 |
+
)
|
1706 |
+
)
|
1707 |
+
|
1708 |
+
|
1709 |
+
# AquilaChat default template
|
1710 |
+
# source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
|
1711 |
+
register_conv_template(
|
1712 |
+
Conversation(
|
1713 |
+
name="aquila-chat",
|
1714 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1715 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1716 |
+
roles=("Human", "Assistant"),
|
1717 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1718 |
+
sep="###",
|
1719 |
+
sep2="",
|
1720 |
+
stop_str=["###", "</s>", "[UNK]"],
|
1721 |
+
)
|
1722 |
+
)
|
1723 |
+
# AquilaChat2-34B default template
|
1724 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L212
|
1725 |
+
register_conv_template(
|
1726 |
+
Conversation(
|
1727 |
+
name="aquila-legacy",
|
1728 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1729 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1730 |
+
roles=("### Human: ", "### Assistant: "),
|
1731 |
+
offset=0,
|
1732 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1733 |
+
sep="\n",
|
1734 |
+
sep2="</s>",
|
1735 |
+
stop_str=["</s>", "[UNK]"],
|
1736 |
+
)
|
1737 |
+
)
|
1738 |
+
# AquilaChat2-7B-16K and AquilaChat2-34B-16K default template
|
1739 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L227
|
1740 |
+
register_conv_template(
|
1741 |
+
Conversation(
|
1742 |
+
name="aquila",
|
1743 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1744 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1745 |
+
roles=("Human", "Assistant"),
|
1746 |
+
offset=0,
|
1747 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1748 |
+
sep="###",
|
1749 |
+
sep2="</s>",
|
1750 |
+
stop_str=["</s>", "[UNK]"],
|
1751 |
+
)
|
1752 |
+
)
|
1753 |
+
|
1754 |
+
# AquilaChat2-7B default template
|
1755 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L242
|
1756 |
+
register_conv_template(
|
1757 |
+
Conversation(
|
1758 |
+
name="aquila-v1",
|
1759 |
+
roles=("<|startofpiece|>", "<|endofpiece|>"),
|
1760 |
+
offset=0,
|
1761 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1762 |
+
sep="",
|
1763 |
+
sep2="</s>",
|
1764 |
+
stop_str=["</s>", "<|endoftext|>"],
|
1765 |
+
)
|
1766 |
+
)
|
1767 |
+
|
1768 |
+
# Llama2-Chinese default template
|
1769 |
+
# source: https://huggingface.co/FlagAlpha
|
1770 |
+
register_conv_template(
|
1771 |
+
Conversation(
|
1772 |
+
name="llama2-chinese",
|
1773 |
+
system_template="<s>{system_message}</s>",
|
1774 |
+
roles=("Human", "Assistant", "System"),
|
1775 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1776 |
+
sep="\n",
|
1777 |
+
sep2="\n</s><s>",
|
1778 |
+
stop_str="</s>",
|
1779 |
+
)
|
1780 |
+
)
|
1781 |
+
|
1782 |
+
# Vigogne Instruct default template
|
1783 |
+
# source: https://github.com/bofenghuang/vigogne
|
1784 |
+
register_conv_template(
|
1785 |
+
Conversation(
|
1786 |
+
name="vigogne_instruct",
|
1787 |
+
system_template="### System:\n{system_message}\n\n",
|
1788 |
+
system_message=(
|
1789 |
+
"Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière"
|
1790 |
+
" précise à la demande."
|
1791 |
+
),
|
1792 |
+
roles=("### Instruction", "### Response"),
|
1793 |
+
sep_style=SeparatorStyle.DOLLY,
|
1794 |
+
sep="\n\n",
|
1795 |
+
sep2="</s>",
|
1796 |
+
)
|
1797 |
+
)
|
1798 |
+
|
1799 |
+
# Vigogne Chat default template
|
1800 |
+
register_conv_template(
|
1801 |
+
Conversation(
|
1802 |
+
name="vigogne_chat_v2",
|
1803 |
+
system_template="<|system|>: {system_message}",
|
1804 |
+
system_message=(
|
1805 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1806 |
+
" autant que vous le pouvez."
|
1807 |
+
),
|
1808 |
+
roles=("<|user|>", "<|assistant|>"),
|
1809 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1810 |
+
sep="\n",
|
1811 |
+
sep2="</s>\n",
|
1812 |
+
stop_str="<|user|>",
|
1813 |
+
)
|
1814 |
+
)
|
1815 |
+
|
1816 |
+
# Stable Vicuna default template
|
1817 |
+
# source: https://huggingface.co/TheBloke/stable-vicuna-13B-HF/discussions/5
|
1818 |
+
# source: https://huggingface.co/spaces/CarperAI/StableVicuna/blob/main/app.py
|
1819 |
+
register_conv_template(
|
1820 |
+
Conversation(
|
1821 |
+
name="stable-vicuna",
|
1822 |
+
system_message="### Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!\n",
|
1823 |
+
roles=("### Human", "### Assistant"),
|
1824 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1825 |
+
sep="\n",
|
1826 |
+
sep2="\n\n",
|
1827 |
+
)
|
1828 |
+
)
|
1829 |
+
|
1830 |
+
register_conv_template(
|
1831 |
+
Conversation(
|
1832 |
+
name="vigogne_chat_v3",
|
1833 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1834 |
+
system_message=(
|
1835 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1836 |
+
" autant que vous le pouvez."
|
1837 |
+
),
|
1838 |
+
roles=("[INST]", "[/INST]"),
|
1839 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1840 |
+
sep=" ",
|
1841 |
+
sep2=" </s>",
|
1842 |
+
)
|
1843 |
+
)
|
1844 |
+
|
1845 |
+
# Falcon 180B chat template
|
1846 |
+
# source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
|
1847 |
+
register_conv_template(
|
1848 |
+
Conversation(
|
1849 |
+
name="falcon-chat",
|
1850 |
+
roles=("User", "Falcon"),
|
1851 |
+
system_template="System: {system_message}",
|
1852 |
+
messages=[],
|
1853 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
1854 |
+
sep="\n",
|
1855 |
+
sep2="<|endoftext|>",
|
1856 |
+
stop_str="\nUser:", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
1857 |
+
)
|
1858 |
+
)
|
1859 |
+
|
1860 |
+
# Phind template
|
1861 |
+
# source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
|
1862 |
+
register_conv_template(
|
1863 |
+
Conversation(
|
1864 |
+
name="phind",
|
1865 |
+
system_message="### System Prompt\nYou are an intelligent programming assistant.",
|
1866 |
+
roles=("### User Message", "### Assistant"),
|
1867 |
+
messages=(),
|
1868 |
+
offset=0,
|
1869 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1870 |
+
sep="\n\n",
|
1871 |
+
)
|
1872 |
+
)
|
1873 |
+
|
1874 |
+
# Metharme formatting for Pygmalion models
|
1875 |
+
# source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
|
1876 |
+
register_conv_template(
|
1877 |
+
Conversation(
|
1878 |
+
name="metharme",
|
1879 |
+
system_template="<|system|>{system_message}",
|
1880 |
+
system_message="""Enter RP mode. You shall reply to the user while staying
|
1881 |
+
in character. Your responses must be detailed, creative, immersive, and drive the scenario
|
1882 |
+
forward.""",
|
1883 |
+
roles=("<|user|>", "<|model|>"),
|
1884 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1885 |
+
sep="",
|
1886 |
+
stop_str="<|user|>",
|
1887 |
+
)
|
1888 |
+
)
|
1889 |
+
# xDAN default template
|
1890 |
+
# source: https://huggingface.co/xDAN-AI/xDAN-L1-Chat-RL-v1
|
1891 |
+
register_conv_template(
|
1892 |
+
Conversation(
|
1893 |
+
name="xdan-v1",
|
1894 |
+
system_message="You are a helpful and harmless assistant named xDAN and created by xDAN-AI.Please response and work on questions thinking step by step.",
|
1895 |
+
roles=("### Human", "### Assistant"),
|
1896 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1897 |
+
sep="\n",
|
1898 |
+
stop_str="</s>",
|
1899 |
+
)
|
1900 |
+
)
|
1901 |
+
|
1902 |
+
# Zephyr template
|
1903 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/zephyr-playground/blob/main/dialogues.py
|
1904 |
+
register_conv_template(
|
1905 |
+
Conversation(
|
1906 |
+
name="zephyr",
|
1907 |
+
system_template="<|system|>\n{system_message}",
|
1908 |
+
roles=("<|user|>", "<|assistant|>"),
|
1909 |
+
sep_style=SeparatorStyle.CHATML,
|
1910 |
+
sep="</s>",
|
1911 |
+
stop_token_ids=[2],
|
1912 |
+
stop_str="</s>",
|
1913 |
+
)
|
1914 |
+
)
|
1915 |
+
|
1916 |
+
# CatPPT template
|
1917 |
+
# reference: https://huggingface.co/rishiraj/CatPPT
|
1918 |
+
register_conv_template(
|
1919 |
+
Conversation(
|
1920 |
+
name="catppt",
|
1921 |
+
system_template="<|system|>\n{system_message}",
|
1922 |
+
roles=("<|user|>", "<|assistant|>"),
|
1923 |
+
sep_style=SeparatorStyle.CHATML,
|
1924 |
+
sep="</s>",
|
1925 |
+
stop_token_ids=[2],
|
1926 |
+
stop_str="</s>",
|
1927 |
+
)
|
1928 |
+
)
|
1929 |
+
|
1930 |
+
# TinyLlama template
|
1931 |
+
# reference: https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0
|
1932 |
+
register_conv_template(
|
1933 |
+
Conversation(
|
1934 |
+
name="TinyLlama",
|
1935 |
+
system_template="<|system|>\n{system_message}",
|
1936 |
+
roles=("<|user|>", "<|assistant|>"),
|
1937 |
+
sep_style=SeparatorStyle.CHATML,
|
1938 |
+
sep="</s>",
|
1939 |
+
stop_token_ids=[2],
|
1940 |
+
stop_str="</s>",
|
1941 |
+
)
|
1942 |
+
)
|
1943 |
+
|
1944 |
+
# Orca-2 template
|
1945 |
+
# reference: https://huggingface.co/microsoft/Orca-2-7b
|
1946 |
+
register_conv_template(
|
1947 |
+
Conversation(
|
1948 |
+
name="orca-2",
|
1949 |
+
system_template="<|im_start|>system\n{system_message}",
|
1950 |
+
system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.",
|
1951 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1952 |
+
sep_style=SeparatorStyle.CHATML,
|
1953 |
+
sep="<|im_end|>",
|
1954 |
+
stop_str="<|im_end|>",
|
1955 |
+
)
|
1956 |
+
)
|
1957 |
+
|
1958 |
+
# Deepseek-chat template
|
1959 |
+
# reference: https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat/blob/main/tokenizer_config.json
|
1960 |
+
register_conv_template(
|
1961 |
+
Conversation(
|
1962 |
+
name="deepseek-chat",
|
1963 |
+
system_message="<|begin▁of▁sentence|>", # must add a bos token before first message
|
1964 |
+
roles=("User", "Assistant"),
|
1965 |
+
sep_style=SeparatorStyle.DEEPSEEK_CHAT,
|
1966 |
+
sep="\n\n",
|
1967 |
+
sep2="<|end▁of▁sentence|>",
|
1968 |
+
stop_str="<|end▁of▁sentence|>",
|
1969 |
+
)
|
1970 |
+
)
|
1971 |
+
|
1972 |
+
# Yuan2.0 chat template
|
1973 |
+
# source: https://huggingface.co/IEITYuan/Yuan2-2B-Janus-hf/blob/main/tokenizer_config.json#L6
|
1974 |
+
register_conv_template(
|
1975 |
+
Conversation(
|
1976 |
+
name="yuan2",
|
1977 |
+
roles=("user", "assistant"),
|
1978 |
+
sep_style=SeparatorStyle.YUAN2,
|
1979 |
+
sep="<sep>",
|
1980 |
+
sep2="\n",
|
1981 |
+
stop_token_ids=[
|
1982 |
+
77185,
|
1983 |
+
], # "<eod>"
|
1984 |
+
stop_str="<eod>",
|
1985 |
+
)
|
1986 |
+
)
|
1987 |
+
|
1988 |
+
# Solar-10.7B Chat Template
|
1989 |
+
# Reference: https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0/blob/main/tokenizer_config.json
|
1990 |
+
register_conv_template(
|
1991 |
+
Conversation(
|
1992 |
+
name="solar",
|
1993 |
+
system_message="",
|
1994 |
+
roles=("### User", "### Assistant"),
|
1995 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
1996 |
+
sep="\n\n",
|
1997 |
+
stop_str="</s>",
|
1998 |
+
)
|
1999 |
+
)
|
2000 |
+
|
2001 |
+
# nvidia/Llama2-70B-SteerLM-Chat
|
2002 |
+
register_conv_template(
|
2003 |
+
Conversation(
|
2004 |
+
name="steerlm",
|
2005 |
+
system_message="",
|
2006 |
+
roles=("user", "assistant"),
|
2007 |
+
sep_style=SeparatorStyle.DEFAULT,
|
2008 |
+
sep=None,
|
2009 |
+
)
|
2010 |
+
)
|
2011 |
+
|
2012 |
+
# yuan 2.0 template
|
2013 |
+
# reference:https://github.com/IEIT-Yuan/Yuan-2.0
|
2014 |
+
# reference:https://huggingface.co/IEITYuan
|
2015 |
+
register_conv_template(
|
2016 |
+
Conversation(
|
2017 |
+
name="yuan",
|
2018 |
+
system_template="",
|
2019 |
+
roles=("", ""),
|
2020 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
2021 |
+
sep="<sep>",
|
2022 |
+
stop_str="<eod>",
|
2023 |
+
)
|
2024 |
+
)
|
2025 |
+
|
2026 |
+
# Cllm chat template
|
2027 |
+
# reference:
|
2028 |
+
register_conv_template(
|
2029 |
+
Conversation(
|
2030 |
+
name="cllm",
|
2031 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
2032 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
2033 |
+
roles=("USER", "ASSISTANT"),
|
2034 |
+
sep_style=SeparatorStyle.CLLM,
|
2035 |
+
sep=" ",
|
2036 |
+
sep2="</s>",
|
2037 |
+
)
|
2038 |
+
)
|
2039 |
+
|
2040 |
+
|
2041 |
+
# Llava-chatml
|
2042 |
+
# reference: https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/llava/conversation.py#L361
|
2043 |
+
register_conv_template(
|
2044 |
+
Conversation(
|
2045 |
+
name="llava-chatml",
|
2046 |
+
system_template="<|im_start|>system\n{system_message}",
|
2047 |
+
system_message="Answer the questions.",
|
2048 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
2049 |
+
sep_style=SeparatorStyle.CHATML,
|
2050 |
+
sep="<|im_end|>",
|
2051 |
+
stop_str="<|im_end|>",
|
2052 |
+
)
|
2053 |
+
)
|
2054 |
+
|
2055 |
+
# Gemma
|
2056 |
+
# reference: https://huggingface.co/google/gemma-7b-it?text=%3Cstart_of_turn%3Euser%0AHow+does+the+brain+work%3F%3Cend_of_turn%3E%0A%3Cstart_of_turn%3Emodel
|
2057 |
+
register_conv_template(
|
2058 |
+
Conversation(
|
2059 |
+
name="gemma",
|
2060 |
+
roles=("user", "model"),
|
2061 |
+
sep_style=SeparatorStyle.GEMMA,
|
2062 |
+
sep="<end_of_turn>\n",
|
2063 |
+
stop_str="<end_of_turn>",
|
2064 |
+
)
|
2065 |
+
)
|
2066 |
+
|
2067 |
+
register_conv_template(
|
2068 |
+
Conversation(
|
2069 |
+
name="yandexgpt",
|
2070 |
+
system_message="",
|
2071 |
+
roles=("user", "assistant"),
|
2072 |
+
sep_style=None,
|
2073 |
+
sep=None,
|
2074 |
+
)
|
2075 |
+
)
|
2076 |
+
|
2077 |
+
if __name__ == "__main__":
|
2078 |
+
from fastchat.conversation import get_conv_template
|
2079 |
+
|
2080 |
+
print("-- Vicuna template --")
|
2081 |
+
conv = get_conv_template("vicuna_v1.1")
|
2082 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2083 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2084 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2085 |
+
conv.append_message(conv.roles[1], None)
|
2086 |
+
print(conv.get_prompt())
|
2087 |
+
|
2088 |
+
print("\n")
|
2089 |
+
|
2090 |
+
print("-- Llama-2 template --")
|
2091 |
+
conv = get_conv_template("llama-2")
|
2092 |
+
conv.set_system_message("You are a helpful, respectful and honest assistant.")
|
2093 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2094 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2095 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2096 |
+
conv.append_message(conv.roles[1], None)
|
2097 |
+
print(conv.get_prompt())
|
2098 |
+
|
2099 |
+
print("\n")
|
2100 |
+
|
2101 |
+
print("-- ChatGPT template --")
|
2102 |
+
conv = get_conv_template("chatgpt")
|
2103 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2104 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2105 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2106 |
+
conv.append_message(conv.roles[1], None)
|
2107 |
+
print(conv.to_openai_api_messages())
|
2108 |
+
|
2109 |
+
print("\n")
|
2110 |
+
|
2111 |
+
print("-- Claude template --")
|
2112 |
+
conv = get_conv_template("claude")
|
2113 |
+
conv.append_message(conv.roles[0], "Hello!")
|
2114 |
+
conv.append_message(conv.roles[1], "Hi!")
|
2115 |
+
conv.append_message(conv.roles[0], "How are you?")
|
2116 |
+
conv.append_message(conv.roles[1], None)
|
2117 |
+
print(conv.get_prompt())
|
fastchat/data/__init__.py
ADDED
File without changes
|
fastchat/data/clean_sharegpt.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
- Convert html to markdown with basic data cleaning.
|
3 |
+
- Deduplication.
|
4 |
+
|
5 |
+
Usage:
|
6 |
+
python3 -m fastchat.data.clean_sharegpt --in sharegpt_html.json --out sharegpt_clean.json
|
7 |
+
"""
|
8 |
+
import argparse
|
9 |
+
from concurrent.futures import ProcessPoolExecutor
|
10 |
+
import json
|
11 |
+
import logging
|
12 |
+
import re
|
13 |
+
from typing import Dict, Union
|
14 |
+
|
15 |
+
import bs4
|
16 |
+
import markdownify # == 0.11.6
|
17 |
+
from tqdm import tqdm
|
18 |
+
|
19 |
+
|
20 |
+
div_pattern = re.compile("<div.*?>")
|
21 |
+
span_pattern = re.compile("<span.*?>")
|
22 |
+
code_lang_pattern = re.compile(
|
23 |
+
"```\s*" + "(.*?)" + "(?:Copy code)+" + "(.+?)" + "\s*?```", re.DOTALL
|
24 |
+
)
|
25 |
+
code_lang_format = "```\g<1>\n\g<2>\n```"
|
26 |
+
regenerate_pattern = re.compile("\d+ / \d+")
|
27 |
+
copy_chars_pattern = re.compile("Copy\d+ chars / \d+ words")
|
28 |
+
copy_code_pattern = re.compile("```(.*?)Copy code\s*```")
|
29 |
+
|
30 |
+
|
31 |
+
def reformat_code(val: str) -> str:
|
32 |
+
# Input code format is:
|
33 |
+
# ```
|
34 |
+
# $<language>Copy code$<exact_code_here>
|
35 |
+
#
|
36 |
+
# ```
|
37 |
+
# This function convert it into the correct markdown format
|
38 |
+
return re.sub(code_lang_pattern, code_lang_format, val)
|
39 |
+
|
40 |
+
|
41 |
+
def html_to_markdown(val: str) -> str:
|
42 |
+
# Remove all <div>. This is required to make intent work in code blocks.
|
43 |
+
val = re.sub(div_pattern, "", val)
|
44 |
+
# Remove all <span>. This is required to make underscores work in code blocks.
|
45 |
+
val = re.sub(span_pattern, "", val)
|
46 |
+
# Markdown to html
|
47 |
+
val = markdownify.markdownify(val).strip()
|
48 |
+
# Reformat code
|
49 |
+
val = reformat_code(val)
|
50 |
+
|
51 |
+
# Remove noisy "[number] / [number]" at the beginning
|
52 |
+
noise = re.search(regenerate_pattern, val)
|
53 |
+
if noise and noise.start() == 0:
|
54 |
+
val = val[noise.end() :]
|
55 |
+
# Remove noisy "Copy[number] chars / [number] words"
|
56 |
+
val = re.sub(copy_chars_pattern, "", val)
|
57 |
+
# Remove empty code block ```\nCopy code\n```
|
58 |
+
val = re.sub(copy_code_pattern, "", val)
|
59 |
+
|
60 |
+
# Strip
|
61 |
+
val = val.replace("\n\n\n", "\n").strip()
|
62 |
+
|
63 |
+
return val
|
64 |
+
|
65 |
+
|
66 |
+
def contain_blocked_words(val: str) -> bool:
|
67 |
+
blocked_words = ["openai", "chatgpt"]
|
68 |
+
for w in blocked_words:
|
69 |
+
if w in val.lower():
|
70 |
+
return True
|
71 |
+
return False
|
72 |
+
|
73 |
+
|
74 |
+
def contain_blocked_responses(role: str, val: str) -> bool:
|
75 |
+
if role == "gpt":
|
76 |
+
blocked_responses = [
|
77 |
+
"Too many requests in 1 hour. Try again later.",
|
78 |
+
"!Too many requests in 1 hour. Try again later.",
|
79 |
+
]
|
80 |
+
for w in blocked_responses:
|
81 |
+
if val.startswith(w):
|
82 |
+
return True
|
83 |
+
return False
|
84 |
+
|
85 |
+
|
86 |
+
def clean_html_one_sample(sample):
|
87 |
+
roles = ["human", "gpt"]
|
88 |
+
|
89 |
+
if len(sample["conversations"]) <= 1:
|
90 |
+
return (sample, 1)
|
91 |
+
|
92 |
+
# Adjust the offset for cases like https://sharegpt.com/c/VyaZlh4
|
93 |
+
if sample["conversations"][0]["from"] != "human":
|
94 |
+
sample["conversations"] = sample["conversations"][1:]
|
95 |
+
if len(sample["conversations"]) <= 1:
|
96 |
+
return (sample, 1)
|
97 |
+
|
98 |
+
if sample["conversations"][-1]["from"] == "human":
|
99 |
+
sample["conversations"] = sample["conversations"][:-1]
|
100 |
+
if len(sample["conversations"]) <= 1:
|
101 |
+
return (sample, 1)
|
102 |
+
|
103 |
+
char_count = 0
|
104 |
+
new_conversations = []
|
105 |
+
for i, c in enumerate(sample["conversations"]):
|
106 |
+
if c["from"] != roles[i % 2]:
|
107 |
+
return (sample, 2)
|
108 |
+
|
109 |
+
if contain_blocked_words(c["value"]):
|
110 |
+
return (sample, 3)
|
111 |
+
|
112 |
+
try:
|
113 |
+
new_val = html_to_markdown(c["value"])
|
114 |
+
except (bs4.builder.ParserRejectedMarkup, AssertionError):
|
115 |
+
return (sample, 4)
|
116 |
+
|
117 |
+
if contain_blocked_responses(c["from"], new_val):
|
118 |
+
return (sample, 3)
|
119 |
+
|
120 |
+
# Filter empty answers like https://sharegpt.com/c/mrllZ6u
|
121 |
+
if not new_val or not new_val[0].isprintable():
|
122 |
+
break
|
123 |
+
|
124 |
+
char_count += len(new_val)
|
125 |
+
new_conversations.append(
|
126 |
+
{
|
127 |
+
"from": c["from"],
|
128 |
+
"value": new_val,
|
129 |
+
}
|
130 |
+
)
|
131 |
+
|
132 |
+
new_conversations = new_conversations[: len(new_conversations) // 2 * 2]
|
133 |
+
sample["conversations"] = new_conversations
|
134 |
+
|
135 |
+
if char_count < 16 or len(sample["conversations"]) <= 0:
|
136 |
+
return (sample, 1)
|
137 |
+
|
138 |
+
return (sample, 0)
|
139 |
+
|
140 |
+
|
141 |
+
def clean_html_all(content, begin, end):
|
142 |
+
"""
|
143 |
+
Clean the source html files.
|
144 |
+
"""
|
145 |
+
cnt_skip = 0
|
146 |
+
cnt_blocked_words = 0
|
147 |
+
cnt_wrong_format = 0
|
148 |
+
cnt_parser_error = 0
|
149 |
+
cnt_too_short = 0
|
150 |
+
cnt_id_duplication = 0
|
151 |
+
cnt_value_duplication = 0
|
152 |
+
cnt_plugin = 0
|
153 |
+
cnt_tag = 0
|
154 |
+
|
155 |
+
content = content[begin:end]
|
156 |
+
processed = []
|
157 |
+
with ProcessPoolExecutor() as executor:
|
158 |
+
for result in tqdm(
|
159 |
+
executor.map(clean_html_one_sample, content), total=len(content)
|
160 |
+
):
|
161 |
+
processed.append(result)
|
162 |
+
|
163 |
+
visited = {}
|
164 |
+
new_content = []
|
165 |
+
for sample, error_code in processed:
|
166 |
+
cid = sample["id"]
|
167 |
+
skipped = True
|
168 |
+
|
169 |
+
if error_code != 0:
|
170 |
+
if error_code == 1:
|
171 |
+
print(f"id {cid} is too short")
|
172 |
+
cnt_too_short += 1
|
173 |
+
elif error_code == 2:
|
174 |
+
print(f"id {cid} has a wrong format")
|
175 |
+
cnt_wrong_format += 1
|
176 |
+
elif error_code == 3:
|
177 |
+
print(f"id {cid} contains blocked words")
|
178 |
+
cnt_blocked_words += 1
|
179 |
+
elif error_code == 4:
|
180 |
+
print(f"id {cid} contains parser errors")
|
181 |
+
cnt_parser_error += 1
|
182 |
+
else:
|
183 |
+
raise ValueError(f"Invalid error_code: {error_code}")
|
184 |
+
elif cid in visited:
|
185 |
+
print(f"id {cid} is an id duplication of {visited[cid]}")
|
186 |
+
cnt_id_duplication += 1
|
187 |
+
elif sample.get("plugins", None) is not None:
|
188 |
+
print(f"id {cid} contains plugin")
|
189 |
+
cnt_plugin += 1
|
190 |
+
else:
|
191 |
+
key = (
|
192 |
+
sample["conversations"][0]["value"],
|
193 |
+
sample["conversations"][1]["value"],
|
194 |
+
)
|
195 |
+
if key in visited:
|
196 |
+
print(f"id {cid} is a value duplication of {visited[key]}")
|
197 |
+
cnt_value_duplication += 1
|
198 |
+
else:
|
199 |
+
visited[cid] = visited[key] = cid
|
200 |
+
skipped = False
|
201 |
+
|
202 |
+
if not skipped:
|
203 |
+
new_content.append(sample)
|
204 |
+
else:
|
205 |
+
cnt_skip += 1
|
206 |
+
|
207 |
+
print(
|
208 |
+
f"total: {len(content)}, skip: {cnt_skip}, new: {len(new_content)}, "
|
209 |
+
f"cnt_blocked_words: {cnt_blocked_words}, cnt_parser_error: {cnt_parser_error}, "
|
210 |
+
f"cnt_wrong_format: {cnt_wrong_format}, "
|
211 |
+
f"cnt_too_short: {cnt_too_short}, cnt_id_duplication: {cnt_id_duplication}, "
|
212 |
+
f"cnt_value_duplication: {cnt_value_duplication}, cnt_plugin: {cnt_plugin}"
|
213 |
+
)
|
214 |
+
|
215 |
+
return new_content
|
216 |
+
|
217 |
+
|
218 |
+
def main(args):
|
219 |
+
content = json.load(open(args["in_file"], "r"))
|
220 |
+
content = clean_html_all(content, args["begin"], args["end"])
|
221 |
+
json.dump(content, open(args["out_file"], "w"), indent=2, ensure_ascii=False)
|
222 |
+
|
223 |
+
|
224 |
+
if __name__ == "__main__":
|
225 |
+
parser = argparse.ArgumentParser()
|
226 |
+
parser.add_argument("--in-file", type=str, required=True)
|
227 |
+
parser.add_argument("--out-file", type=str, default="sharegpt_clean.json")
|
228 |
+
parser.add_argument("--begin", type=int)
|
229 |
+
parser.add_argument("--end", type=int)
|
230 |
+
parser.add_argument("--debug", action="store_true")
|
231 |
+
args = parser.parse_args()
|
232 |
+
main(vars(args))
|
fastchat/data/convert_alpaca.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Convert alpaca dataset into sharegpt format.
|
3 |
+
|
4 |
+
Usage: python3 -m fastchat.data.convert_alpaca --in alpaca_data.json
|
5 |
+
"""
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
import json
|
9 |
+
|
10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
|
14 |
+
if __name__ == "__main__":
|
15 |
+
parser = argparse.ArgumentParser()
|
16 |
+
parser.add_argument("--in-file", type=str)
|
17 |
+
parser.add_argument("--out-file", type=str)
|
18 |
+
args = parser.parse_args()
|
19 |
+
|
20 |
+
content = json.load(open(args.in_file, "r"))
|
21 |
+
new_content = []
|
22 |
+
for i, c in enumerate(content):
|
23 |
+
if len(c["input"].strip()) > 1:
|
24 |
+
q, a = c["instruction"] + "\nInput:\n" + c["input"], c["output"]
|
25 |
+
else:
|
26 |
+
q, a = c["instruction"], c["output"]
|
27 |
+
new_content.append(
|
28 |
+
{
|
29 |
+
"id": f"alpaca_{i}",
|
30 |
+
"conversations": [
|
31 |
+
{"from": "human", "value": q},
|
32 |
+
{"from": "gpt", "value": a},
|
33 |
+
],
|
34 |
+
}
|
35 |
+
)
|
36 |
+
|
37 |
+
print(f"#out: {len(new_content)}")
|
38 |
+
json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
|
fastchat/data/extract_gpt4_only.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Extract the conversations generated by GPT-4 only.
|
3 |
+
|
4 |
+
Usage: python3 -m fastchat.data.extract_gpt4_only --in sharegpt.json
|
5 |
+
"""
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
|
10 |
+
if __name__ == "__main__":
|
11 |
+
parser = argparse.ArgumentParser()
|
12 |
+
parser.add_argument("--in-file", type=str, required=True)
|
13 |
+
parser.add_argument("--out-file", type=str)
|
14 |
+
parser.add_argument("--begin", type=int)
|
15 |
+
parser.add_argument("--end", type=int)
|
16 |
+
args = parser.parse_args()
|
17 |
+
|
18 |
+
content = json.load(open(args.in_file, "r"))
|
19 |
+
content = content[args.begin : args.end]
|
20 |
+
new_content = []
|
21 |
+
for c in content:
|
22 |
+
model = c.get("model", None)
|
23 |
+
if model == "gpt4" or model is None:
|
24 |
+
new_content.append(c)
|
25 |
+
|
26 |
+
if args.out_file:
|
27 |
+
out_file = args.out_file
|
28 |
+
else:
|
29 |
+
out_file = args.in_file.replace(".json", "_gpt4.json")
|
30 |
+
|
31 |
+
print(f"#in: {len(content)}, #out: {len(new_content)}")
|
32 |
+
json.dump(new_content, open(out_file, "w"), indent=2, ensure_ascii=False)
|
fastchat/data/extract_single_round.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Extract the first round of the conversations.
|
3 |
+
|
4 |
+
Usage: python3 -m fastchat.data.extract_single_round --in sharegpt.json
|
5 |
+
"""
|
6 |
+
import argparse
|
7 |
+
import json
|
8 |
+
|
9 |
+
|
10 |
+
if __name__ == "__main__":
|
11 |
+
parser = argparse.ArgumentParser()
|
12 |
+
parser.add_argument("--in-file", type=str, required=True)
|
13 |
+
parser.add_argument("--out-file", type=str)
|
14 |
+
parser.add_argument("--begin", type=int)
|
15 |
+
parser.add_argument("--end", type=int)
|
16 |
+
args = parser.parse_args()
|
17 |
+
|
18 |
+
content = json.load(open(args.in_file, "r"))
|
19 |
+
content = content[args.begin : args.end]
|
20 |
+
for c in content:
|
21 |
+
c["conversations"] = c["conversations"][:2]
|
22 |
+
|
23 |
+
if args.out_file:
|
24 |
+
out_file = args.out_file
|
25 |
+
else:
|
26 |
+
out_file = args.in_file.replace(".json", "_single.json")
|
27 |
+
|
28 |
+
print(f"#in: {len(content)}, #out: {len(content)}")
|
29 |
+
json.dump(content, open(out_file, "w"), indent=2, ensure_ascii=False)
|
fastchat/data/filter_wrong_format.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Filter conversations with wrong formats.
|
3 |
+
|
4 |
+
Usage:
|
5 |
+
python3 -m fastchat.data.filter_wrong_format --in input.json --out output.json
|
6 |
+
|
7 |
+
"""
|
8 |
+
import argparse
|
9 |
+
import json
|
10 |
+
import re
|
11 |
+
|
12 |
+
from tqdm import tqdm
|
13 |
+
|
14 |
+
wrong_indices_pattern = re.compile("\n1\. [^2]*\n1\. ")
|
15 |
+
|
16 |
+
|
17 |
+
def should_skip(conv):
|
18 |
+
# Filter wrong list indices like https://sharegpt.com/c/1pREAGO
|
19 |
+
for sentence in conv["conversations"]:
|
20 |
+
val = sentence["value"]
|
21 |
+
sub = re.search(wrong_indices_pattern, val)
|
22 |
+
if sub is not None:
|
23 |
+
return True
|
24 |
+
|
25 |
+
return False
|
26 |
+
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
parser = argparse.ArgumentParser()
|
30 |
+
parser.add_argument("--in-file", type=str, required=True)
|
31 |
+
parser.add_argument("--out-file", type=str, required=True)
|
32 |
+
args = parser.parse_args()
|
33 |
+
|
34 |
+
content = json.load(open(args.in_file, "r"))
|
35 |
+
|
36 |
+
new_content = []
|
37 |
+
for conv in tqdm(content):
|
38 |
+
if should_skip(conv):
|
39 |
+
print(f"{conv['id']} contains a wrong format.")
|
40 |
+
else:
|
41 |
+
new_content.append(conv)
|
42 |
+
|
43 |
+
print(f"#in: {len(content)}, #out: {len(new_content)}")
|
44 |
+
json.dump(new_content, open(args.out_file, "w"), indent=2, ensure_ascii=False)
|