Spaces:
Runtime error
Runtime error
jiangjiechen
commited on
Commit
•
8acb22e
1
Parent(s):
fbb1938
init app
Browse files- .gitignore +234 -0
- LICENSE +201 -0
- app.py +396 -0
- app_modules/overwrites.py +57 -0
- app_modules/presets.py +78 -0
- app_modules/utils.py +375 -0
- assets/Kelpy-Codos.js +76 -0
- assets/custom.css +191 -0
- assets/custom.js +1 -0
- assets/favicon.ico +0 -0
- assets/totopower-removebg.png +0 -0
- auction_workflow.py +293 -0
- data/bidders_demo.jsonl +4 -0
- data/items_demo.jsonl +26 -0
- requirements.txt +20 -0
- src/auctioneer_base.py +259 -0
- src/bidder_base.py +1031 -0
- src/human_bidder.py +137 -0
- src/item_base.py +50 -0
- src/prompt_base.py +349 -0
- utils.py +73 -0
.gitignore
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by .ignore support plugin (hsz.mobi)
|
2 |
+
### macOS template
|
3 |
+
# General
|
4 |
+
.DS_Store
|
5 |
+
.AppleDouble
|
6 |
+
.LSOverride
|
7 |
+
|
8 |
+
# Icon must end with two \r
|
9 |
+
Icon
|
10 |
+
|
11 |
+
# Thumbnails
|
12 |
+
._*
|
13 |
+
|
14 |
+
# Files that might appear in the root of a volume
|
15 |
+
.DocumentRevisions-V100
|
16 |
+
.fseventsd
|
17 |
+
.Spotlight-V100
|
18 |
+
.TemporaryItems
|
19 |
+
.Trashes
|
20 |
+
.VolumeIcon.icns
|
21 |
+
.com.apple.timemachine.donotpresent
|
22 |
+
|
23 |
+
# Directories potentially created on remote AFP share
|
24 |
+
.AppleDB
|
25 |
+
.AppleDesktop
|
26 |
+
Network Trash Folder
|
27 |
+
Temporary Items
|
28 |
+
.apdisk
|
29 |
+
### Python template
|
30 |
+
# Byte-compiled / optimized / DLL files
|
31 |
+
__pycache__/
|
32 |
+
*.py[cod]
|
33 |
+
*$py.class
|
34 |
+
|
35 |
+
# C extensions
|
36 |
+
*.so
|
37 |
+
|
38 |
+
# Distribution / packaging
|
39 |
+
.Python
|
40 |
+
build/
|
41 |
+
develop-eggs/
|
42 |
+
dist/
|
43 |
+
downloads/
|
44 |
+
eggs/
|
45 |
+
.eggs/
|
46 |
+
lib/
|
47 |
+
lib64/
|
48 |
+
parts/
|
49 |
+
sdist/
|
50 |
+
var/
|
51 |
+
wheels/
|
52 |
+
*.egg-info/
|
53 |
+
.installed.cfg
|
54 |
+
*.egg
|
55 |
+
MANIFEST
|
56 |
+
|
57 |
+
# PyInstaller
|
58 |
+
# Usually these files are written by a python script from a template
|
59 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
60 |
+
*.manifest
|
61 |
+
*.spec
|
62 |
+
|
63 |
+
# Installer logs
|
64 |
+
pip-log.txt
|
65 |
+
pip-delete-this-directory.txt
|
66 |
+
|
67 |
+
# Unit test / coverage reports
|
68 |
+
htmlcov/
|
69 |
+
.tox/
|
70 |
+
.coverage
|
71 |
+
.coverage.*
|
72 |
+
.cache
|
73 |
+
nosetests.xml
|
74 |
+
coverage.xml
|
75 |
+
*.cover
|
76 |
+
.hypothesis/
|
77 |
+
.pytest_cache/
|
78 |
+
|
79 |
+
# Translations
|
80 |
+
*.mo
|
81 |
+
*.pot
|
82 |
+
|
83 |
+
# Django stuff:
|
84 |
+
*.log
|
85 |
+
local_settings.py
|
86 |
+
db.sqlite3
|
87 |
+
|
88 |
+
# Flask stuff:
|
89 |
+
instance/
|
90 |
+
.webassets-cache
|
91 |
+
|
92 |
+
# Scrapy stuff:
|
93 |
+
.scrapy
|
94 |
+
|
95 |
+
# Sphinx documentation
|
96 |
+
docs/_build/
|
97 |
+
|
98 |
+
# PyBuilder
|
99 |
+
target/
|
100 |
+
|
101 |
+
# Jupyter Notebook
|
102 |
+
.ipynb_checkpoints
|
103 |
+
|
104 |
+
# pyenv
|
105 |
+
.python-version
|
106 |
+
|
107 |
+
# celery beat schedule file
|
108 |
+
celerybeat-schedule
|
109 |
+
|
110 |
+
# SageMath parsed files
|
111 |
+
*.sage.py
|
112 |
+
|
113 |
+
# Environments
|
114 |
+
.env
|
115 |
+
.venv
|
116 |
+
env/
|
117 |
+
venv/
|
118 |
+
ENV/
|
119 |
+
env.bak/
|
120 |
+
venv.bak/
|
121 |
+
|
122 |
+
# Spyder project settings
|
123 |
+
.spyderproject
|
124 |
+
.spyproject
|
125 |
+
|
126 |
+
# Rope project settings
|
127 |
+
.ropeproject
|
128 |
+
|
129 |
+
# mkdocs documentation
|
130 |
+
/site
|
131 |
+
|
132 |
+
# mypy
|
133 |
+
.mypy_cache/
|
134 |
+
### JetBrains template
|
135 |
+
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
136 |
+
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
137 |
+
|
138 |
+
# User-specific stuff
|
139 |
+
.idea/**/workspace.xml
|
140 |
+
.idea/**/tasks.xml
|
141 |
+
.idea/**/usage.statistics.xml
|
142 |
+
.idea/**/dictionaries
|
143 |
+
.idea/**/shelf
|
144 |
+
|
145 |
+
# Sensitive or high-churn files
|
146 |
+
.idea/**/dataSources/
|
147 |
+
.idea/**/dataSources.ids
|
148 |
+
.idea/**/dataSources.local.xml
|
149 |
+
.idea/**/sqlDataSources.xml
|
150 |
+
.idea/**/dynamic.xml
|
151 |
+
.idea/**/uiDesigner.xml
|
152 |
+
.idea/**/dbnavigator.xml
|
153 |
+
|
154 |
+
# Gradle
|
155 |
+
.idea/**/gradle.xml
|
156 |
+
.idea/**/libraries
|
157 |
+
|
158 |
+
# Gradle and Maven with auto-import
|
159 |
+
# When using Gradle or Maven with auto-import, you should exclude module files,
|
160 |
+
# since they will be recreated, and may cause churn. Uncomment if using
|
161 |
+
# auto-import.
|
162 |
+
# .idea/modules.xml
|
163 |
+
# .idea/*.iml
|
164 |
+
# .idea/modules
|
165 |
+
|
166 |
+
# CMake
|
167 |
+
cmake-build-*/
|
168 |
+
|
169 |
+
# Mongo Explorer plugin
|
170 |
+
.idea/**/mongoSettings.xml
|
171 |
+
|
172 |
+
# File-based project format
|
173 |
+
*.iws
|
174 |
+
|
175 |
+
# IntelliJ
|
176 |
+
out/
|
177 |
+
|
178 |
+
# mpeltonen/sbt-idea plugin
|
179 |
+
.idea_modules/
|
180 |
+
|
181 |
+
# JIRA plugin
|
182 |
+
atlassian-ide-plugin.xml
|
183 |
+
|
184 |
+
# Cursive Clojure plugin
|
185 |
+
.idea/replstate.xml
|
186 |
+
|
187 |
+
# Crashlytics plugin (for Android Studio and IntelliJ)
|
188 |
+
com_crashlytics_export_strings.xml
|
189 |
+
crashlytics.properties
|
190 |
+
crashlytics-build.properties
|
191 |
+
fabric.properties
|
192 |
+
|
193 |
+
# Editor-based Rest Client
|
194 |
+
.idea/httpRequests
|
195 |
+
### VirtualEnv template
|
196 |
+
# Virtualenv
|
197 |
+
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
|
198 |
+
.Python
|
199 |
+
[Bb]in
|
200 |
+
[Ii]nclude
|
201 |
+
[Ll]ib
|
202 |
+
[Ll]ib64
|
203 |
+
[Ll]ocal
|
204 |
+
pyvenv.cfg
|
205 |
+
.venv
|
206 |
+
pip-selfcheck.json
|
207 |
+
|
208 |
+
.idea/
|
209 |
+
eden.py
|
210 |
+
backup/
|
211 |
+
raw/
|
212 |
+
runs
|
213 |
+
*nohup*
|
214 |
+
*.pt
|
215 |
+
*.out
|
216 |
+
/nlgeval/
|
217 |
+
*.pkl
|
218 |
+
*.db
|
219 |
+
/cache/
|
220 |
+
_archived/
|
221 |
+
output/
|
222 |
+
models/
|
223 |
+
*_proc
|
224 |
+
lightning_logs/
|
225 |
+
wandb/
|
226 |
+
.lock
|
227 |
+
cjjpy.py
|
228 |
+
logs/
|
229 |
+
exp/
|
230 |
+
*.ipynb
|
231 |
+
docs/
|
232 |
+
.bashrc
|
233 |
+
google-cloud-sdk/
|
234 |
+
scripts
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
app.py
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from app_modules.presets import *
|
4 |
+
from app_modules.overwrites import *
|
5 |
+
from app_modules.utils import *
|
6 |
+
from src.item_base import create_items
|
7 |
+
from src.bidder_base import Bidder
|
8 |
+
from src.human_bidder import HumanBidder
|
9 |
+
from src.auctioneer_base import Auctioneer
|
10 |
+
from auction_workflow import run_auction, make_auction_hash
|
11 |
+
from utils import chunks, reset_state_list
|
12 |
+
|
13 |
+
|
14 |
+
BIDDER_NUM = 4
|
15 |
+
items = create_items('data/items_demo.jsonl')
|
16 |
+
|
17 |
+
def auction_loop_app(*args):
|
18 |
+
global items
|
19 |
+
|
20 |
+
bidder_list = args[0] # gr.State() -> session state
|
21 |
+
items_id = args[1]
|
22 |
+
os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '')
|
23 |
+
os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '')
|
24 |
+
thread_num = args[4]
|
25 |
+
item_shuffle = args[5]
|
26 |
+
enable_discount = args[6]
|
27 |
+
min_markup_pct = args[7]
|
28 |
+
args = args[8:]
|
29 |
+
auction_hash = make_auction_hash()
|
30 |
+
|
31 |
+
items_to_bid = [items[i] for i in items_id]
|
32 |
+
|
33 |
+
auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct)
|
34 |
+
auctioneer.init_items(items_to_bid)
|
35 |
+
if item_shuffle:
|
36 |
+
auctioneer.shuffle_items()
|
37 |
+
|
38 |
+
# must correspond to the order in app's parameters
|
39 |
+
input_keys = [
|
40 |
+
'chatbot',
|
41 |
+
'model_name',
|
42 |
+
'desire',
|
43 |
+
'plan_strategy',
|
44 |
+
'budget',
|
45 |
+
'correct_belief',
|
46 |
+
'enable_learning',
|
47 |
+
'temperature',
|
48 |
+
'overestimate_percent',
|
49 |
+
]
|
50 |
+
|
51 |
+
# convert flatten list into a json list
|
52 |
+
input_jsl = []
|
53 |
+
for i, chunk in enumerate(chunks(args, len(input_keys))):
|
54 |
+
js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash}
|
55 |
+
for k, v in zip(input_keys, chunk):
|
56 |
+
js[k] = v
|
57 |
+
input_jsl.append(js)
|
58 |
+
|
59 |
+
for js in input_jsl:
|
60 |
+
js.pop('chatbot')
|
61 |
+
if 'human' in js['model_name']:
|
62 |
+
bidder_list.append(HumanBidder.create(**js))
|
63 |
+
else:
|
64 |
+
bidder_list.append(Bidder.create(**js))
|
65 |
+
|
66 |
+
yield from run_auction(auction_hash, auctioneer, bidder_list, thread_num, yield_for_demo=True)
|
67 |
+
|
68 |
+
|
69 |
+
with open("assets/custom.css", "r", encoding="utf-8") as f:
|
70 |
+
customCSS = f.read()
|
71 |
+
|
72 |
+
with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
|
73 |
+
with gr.Row():
|
74 |
+
gr.HTML(title)
|
75 |
+
|
76 |
+
gr.Markdown(description_top)
|
77 |
+
|
78 |
+
with gr.Row():
|
79 |
+
with gr.Column(scale=6):
|
80 |
+
# item_file = gr.File(label="Upload Item File", file_types=[".jsonl"])
|
81 |
+
items_checkbox = gr.CheckboxGroup(
|
82 |
+
choices=[item.info() for item in items[:20]],
|
83 |
+
label="Items in Auction",
|
84 |
+
info="Select the items you want to include in the auction.",
|
85 |
+
value=[item.info() for item in items[:8]],
|
86 |
+
type="index",
|
87 |
+
)
|
88 |
+
|
89 |
+
with gr.Column(scale=4):
|
90 |
+
with gr.Row():
|
91 |
+
openai_key = gr.Textbox(label="OpenAI API Key", value="", type="password", placeholder="sk-..")
|
92 |
+
anthropic_key = gr.Textbox(label="Anthropic API Key", value="", type="password", placeholder="sk-ant-..")
|
93 |
+
|
94 |
+
with gr.Row():
|
95 |
+
with gr.Row():
|
96 |
+
item_shuffle = gr.Checkbox(
|
97 |
+
label="Shuffle Items",
|
98 |
+
value=False,
|
99 |
+
info='Shuffle the order of items in the auction.')
|
100 |
+
enable_discount = gr.Checkbox(
|
101 |
+
label="Enable Discount",
|
102 |
+
value=False,
|
103 |
+
info='When an item fails to sell at auction, it can be auctioned again at a reduced price.')
|
104 |
+
|
105 |
+
with gr.Column():
|
106 |
+
min_markup_pct = gr.Slider(
|
107 |
+
minimum=0.1,
|
108 |
+
maximum=0.5,
|
109 |
+
value=0.1,
|
110 |
+
step=0.1,
|
111 |
+
interactive=True,
|
112 |
+
label='Min Increase',
|
113 |
+
info="The minimum percentage to increase a bid.",
|
114 |
+
)
|
115 |
+
|
116 |
+
thread_num = gr.Slider(
|
117 |
+
minimum=1,
|
118 |
+
maximum=BIDDER_NUM,
|
119 |
+
value=min(5, BIDDER_NUM),
|
120 |
+
step=1,
|
121 |
+
interactive=True,
|
122 |
+
label='Thread Number',
|
123 |
+
info="More threads, faster bidding, but will run into RateLimitError quicker."
|
124 |
+
)
|
125 |
+
|
126 |
+
with gr.Row():
|
127 |
+
bidder_info_gr = []
|
128 |
+
chatbots = []
|
129 |
+
monitors = []
|
130 |
+
textbox_list = []
|
131 |
+
for i in range(BIDDER_NUM):
|
132 |
+
with gr.Tab(label=f"Bidder {i+1}"):
|
133 |
+
with gr.Row().style(equal_height=True):
|
134 |
+
with gr.Column(scale=6):
|
135 |
+
with gr.Row():
|
136 |
+
chatbot = gr.Chatbot(elem_id="chuanhu_chatbot", height=600, label='Auction Log')
|
137 |
+
input_box = gr.Textbox(label="Human Bidder Input", interactive=False, placeholder="Please wait a moment before engaging in the auction.", visible=False)
|
138 |
+
chatbots.append(chatbot)
|
139 |
+
textbox_list.append(input_box)
|
140 |
+
with gr.Column(scale=4):
|
141 |
+
with gr.Tab(label=f'Parameters'):
|
142 |
+
model_name = gr.Dropdown(
|
143 |
+
choices=[
|
144 |
+
'rule',
|
145 |
+
'human',
|
146 |
+
'gpt-3.5-turbo-0613',
|
147 |
+
'gpt-3.5-turbo-16k-0613',
|
148 |
+
'gpt-4-0613',
|
149 |
+
# 'claude-instant-1.1',
|
150 |
+
'claude-instant-1.2',
|
151 |
+
# 'claude-1.3',
|
152 |
+
'claude-2.0',
|
153 |
+
# 'chat-bison-001',
|
154 |
+
],
|
155 |
+
value='gpt-3.5-turbo-16k-0613',
|
156 |
+
label="Model Selection",
|
157 |
+
)
|
158 |
+
budget = gr.Number(
|
159 |
+
value=10000,
|
160 |
+
label='Budget ($)'
|
161 |
+
)
|
162 |
+
with gr.Row():
|
163 |
+
plan_strategy = gr.Dropdown(
|
164 |
+
choices=[
|
165 |
+
'none',
|
166 |
+
'static',
|
167 |
+
'adaptive',
|
168 |
+
],
|
169 |
+
value='adaptive',
|
170 |
+
label='Planning Strategy',
|
171 |
+
info='None: no plan. Static: plan only once. Adaptive: replan for the remaining items.'
|
172 |
+
)
|
173 |
+
desire = gr.Dropdown(
|
174 |
+
choices=[
|
175 |
+
# 'default',
|
176 |
+
'maximize_profit',
|
177 |
+
'maximize_items',
|
178 |
+
# 'specific_items',
|
179 |
+
],
|
180 |
+
value='maximize_profit',
|
181 |
+
label='Desire',
|
182 |
+
info='Default desires: spending all the budget, stay within budget. All desires include the default one.',
|
183 |
+
)
|
184 |
+
overestimate_percent = gr.Slider(
|
185 |
+
minimum=-100,
|
186 |
+
maximum=100,
|
187 |
+
value=10,
|
188 |
+
step=10,
|
189 |
+
interactive=True,
|
190 |
+
label='Overestimate Percent (%)',
|
191 |
+
info="Overestimate the true value of items by this percentage.",
|
192 |
+
)
|
193 |
+
with gr.Row():
|
194 |
+
correct_belief = gr.Checkbox(
|
195 |
+
label='Correct Wrong Beliefs',
|
196 |
+
value=True,
|
197 |
+
info='Forceful beliefs correction about self and others.',
|
198 |
+
)
|
199 |
+
enable_learning = gr.Checkbox(
|
200 |
+
label='Enable Learning',
|
201 |
+
value=False,
|
202 |
+
info='Learn from past auctions for future guidance. Only for adaptive bidder.',
|
203 |
+
visible=False
|
204 |
+
)
|
205 |
+
temperature = gr.Slider(
|
206 |
+
minimum=0.,
|
207 |
+
maximum=2.0,
|
208 |
+
value=0.7,
|
209 |
+
step=0.1,
|
210 |
+
interactive=True,
|
211 |
+
label="Temperature",
|
212 |
+
)
|
213 |
+
|
214 |
+
# deprecated
|
215 |
+
# special_items = gr.CheckboxGroup(
|
216 |
+
# value = [],
|
217 |
+
# label='Special Items',
|
218 |
+
# info='Special items add 20% value for you personally.',
|
219 |
+
# visible=False,
|
220 |
+
# )
|
221 |
+
# hedge_percent = gr.Slider(
|
222 |
+
# minimum=0,
|
223 |
+
# maximum=100,
|
224 |
+
# value=90,
|
225 |
+
# step=1,
|
226 |
+
# interactive=True,
|
227 |
+
# label='Strategy (Hedging %)',
|
228 |
+
# info="The maximum percentage of the estimated value to bid on an item.",
|
229 |
+
# visible=False
|
230 |
+
# )
|
231 |
+
|
232 |
+
with gr.Tab(label='Monitors'):
|
233 |
+
with gr.Row():
|
234 |
+
budget_monitor = gr.Number(label='Budget Left ($)', interactive=False)
|
235 |
+
profit_monitor = gr.Number(label='Profit ($)', interactive=False)
|
236 |
+
|
237 |
+
with gr.Row():
|
238 |
+
engagement_monitor = gr.Number(
|
239 |
+
label='Engagement',
|
240 |
+
interactive=False,
|
241 |
+
info='The number of times the bidder has bid.'
|
242 |
+
)
|
243 |
+
failure_monitor = gr.Number(
|
244 |
+
label='Failed Bids',
|
245 |
+
info='Out-of-budget, or less than the previous highest bid.',
|
246 |
+
interactive=False
|
247 |
+
)
|
248 |
+
|
249 |
+
items_own_monitor = gr.DataFrame(
|
250 |
+
label='Items Owned',
|
251 |
+
headers=['Item', 'Bid ($)', 'Value ($)'],
|
252 |
+
datatype=['str', 'number', 'number'],
|
253 |
+
interactive=False,
|
254 |
+
)
|
255 |
+
|
256 |
+
with gr.Row():
|
257 |
+
tokens_monitor = gr.Number(
|
258 |
+
label='Token Used',
|
259 |
+
interactive=False,
|
260 |
+
info='Tokens used in the last call.'
|
261 |
+
)
|
262 |
+
money_monitor = gr.Number(
|
263 |
+
label='API Cost ($)',
|
264 |
+
info='Only OpenAI cost for now.',
|
265 |
+
interactive=False
|
266 |
+
)
|
267 |
+
|
268 |
+
plan_change_monitor = gr.DataFrame(
|
269 |
+
label='Plan Changes',
|
270 |
+
headers=['Round', 'Changed', 'New Plan'],
|
271 |
+
datatype=['str', 'bool', 'str'],
|
272 |
+
interactive=False,
|
273 |
+
)
|
274 |
+
|
275 |
+
plot_monitor = gr.Plot(
|
276 |
+
label='Budget-Profit Plot',
|
277 |
+
interactive=False
|
278 |
+
)
|
279 |
+
|
280 |
+
with gr.Tab(label='Belief Errors'):
|
281 |
+
with gr.Row():
|
282 |
+
self_belief_error_cnt_monitor = gr.Number(
|
283 |
+
label='Wrong Beliefs of Self',
|
284 |
+
info='Not knowing its own budget, bid items, or won items.',
|
285 |
+
interactive=False,
|
286 |
+
)
|
287 |
+
other_belief_error_cnt_monitor = gr.Number(
|
288 |
+
label='Wrong Beliefs of Others',
|
289 |
+
info='Not knowing other bidders\' profits.',
|
290 |
+
interactive=False,
|
291 |
+
)
|
292 |
+
budget_belief_monitor = gr.DataFrame(
|
293 |
+
label='Wrong Belief of Budget ($)',
|
294 |
+
headers=['Round', 'Belief', 'Truth'],
|
295 |
+
datatype=['str', 'number', 'number'],
|
296 |
+
interactive=False,
|
297 |
+
)
|
298 |
+
profit_belief_monitor = gr.DataFrame(
|
299 |
+
label='Wrong Belief of Profit ($)',
|
300 |
+
headers=['Bidder (Round)', 'Belief', 'Truth'],
|
301 |
+
datatype=['str', 'number', 'number'],
|
302 |
+
interactive=False,
|
303 |
+
)
|
304 |
+
win_bid_belief_monitor = gr.DataFrame(
|
305 |
+
label='Wrong Belief of Items Won',
|
306 |
+
headers=['Bidder (Round)',
|
307 |
+
'Belief', 'Truth'],
|
308 |
+
datatype=['str', 'str', 'str'],
|
309 |
+
interactive=False,
|
310 |
+
)
|
311 |
+
|
312 |
+
monitors += [
|
313 |
+
budget_monitor,
|
314 |
+
profit_monitor,
|
315 |
+
items_own_monitor,
|
316 |
+
tokens_monitor,
|
317 |
+
money_monitor,
|
318 |
+
failure_monitor,
|
319 |
+
self_belief_error_cnt_monitor,
|
320 |
+
other_belief_error_cnt_monitor,
|
321 |
+
engagement_monitor,
|
322 |
+
plot_monitor,
|
323 |
+
plan_change_monitor,
|
324 |
+
budget_belief_monitor,
|
325 |
+
profit_belief_monitor,
|
326 |
+
win_bid_belief_monitor,
|
327 |
+
]
|
328 |
+
|
329 |
+
bidder_info_gr += [
|
330 |
+
chatbot,
|
331 |
+
model_name,
|
332 |
+
desire,
|
333 |
+
plan_strategy,
|
334 |
+
budget,
|
335 |
+
correct_belief,
|
336 |
+
enable_learning,
|
337 |
+
temperature,
|
338 |
+
overestimate_percent,
|
339 |
+
]
|
340 |
+
|
341 |
+
with gr.Row():
|
342 |
+
with gr.Column():
|
343 |
+
startBtn = gr.Button('Start Bidding', variant='primary', interactive=True)
|
344 |
+
with gr.Column():
|
345 |
+
clearBtn = gr.Button('New Auction', variant='secondary', interactive=False)
|
346 |
+
btn_list = [startBtn, clearBtn]
|
347 |
+
|
348 |
+
with gr.Accordion(label='Bidding Log (click to open)', open=True):
|
349 |
+
with gr.Row():
|
350 |
+
bidding_log = gr.Markdown(value="")
|
351 |
+
|
352 |
+
gr.Markdown(description)
|
353 |
+
|
354 |
+
bidder_list_state = gr.State([]) # session state
|
355 |
+
|
356 |
+
start_args = dict(
|
357 |
+
fn=auction_loop_app,
|
358 |
+
inputs=[bidder_list_state, items_checkbox, openai_key, anthropic_key, thread_num, item_shuffle, enable_discount, min_markup_pct] + bidder_info_gr,
|
359 |
+
outputs=[bidder_list_state] + chatbots + monitors + [bidding_log] + btn_list + textbox_list, # TODO: handle textbox_list interactivity
|
360 |
+
show_progress=True,
|
361 |
+
)
|
362 |
+
start_event = startBtn.click(**start_args)
|
363 |
+
|
364 |
+
def bot(user_message, bidder_list, id):
|
365 |
+
if len(bidder_list) > 0:
|
366 |
+
bidder = bidder_list[int(id)]
|
367 |
+
if bidder.need_input:
|
368 |
+
bidder.input_box = user_message
|
369 |
+
bidder.semaphore += 1
|
370 |
+
return '', bidder_list
|
371 |
+
|
372 |
+
# handle user input from time to time
|
373 |
+
for i in range(len(textbox_list)):
|
374 |
+
_dummy_id = gr.Number(i, visible=False, interactive=False)
|
375 |
+
textbox_list[i].submit(
|
376 |
+
bot,
|
377 |
+
[textbox_list[i], bidder_list_state, _dummy_id],
|
378 |
+
[textbox_list[i], bidder_list_state])
|
379 |
+
|
380 |
+
clearBtn.click(reset_state_list,
|
381 |
+
inputs=[bidder_list_state] + chatbots + monitors + [bidding_log],
|
382 |
+
outputs=[bidder_list_state] + chatbots + monitors + [bidding_log],
|
383 |
+
show_progress=True).then(lambda: gr.update(interactive=True), outputs=[startBtn])
|
384 |
+
|
385 |
+
demo.title = 'Auction Arena'
|
386 |
+
|
387 |
+
|
388 |
+
demo.queue(max_size=64, concurrency_count=16).launch(
|
389 |
+
# server_name='0.0.0.0',
|
390 |
+
# ssl_verify=False,
|
391 |
+
# share=True,
|
392 |
+
# debug=True,
|
393 |
+
show_api=False,
|
394 |
+
)
|
395 |
+
|
396 |
+
demo.close()
|
app_modules/overwrites.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import logging
|
3 |
+
|
4 |
+
# from llama_index import Prompt
|
5 |
+
from typing import List, Tuple
|
6 |
+
# import mdtex2html
|
7 |
+
|
8 |
+
from app_modules.presets import *
|
9 |
+
from app_modules.utils import *
|
10 |
+
|
11 |
+
def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
|
12 |
+
logging.debug("Compacting text chunks...🚀🚀🚀")
|
13 |
+
combined_str = [c.strip() for c in text_chunks if c.strip()]
|
14 |
+
combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
|
15 |
+
combined_str = "\n\n".join(combined_str)
|
16 |
+
# resplit based on self.max_chunk_overlap
|
17 |
+
text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
|
18 |
+
return text_splitter.split_text(combined_str)
|
19 |
+
|
20 |
+
|
21 |
+
def postprocess(
|
22 |
+
self, y: List[Tuple[str | None, str | None]]
|
23 |
+
) -> List[Tuple[str | None, str | None]]:
|
24 |
+
"""
|
25 |
+
Parameters:
|
26 |
+
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
27 |
+
Returns:
|
28 |
+
List of tuples representing the message and response. Each message and response will be a string of HTML.
|
29 |
+
"""
|
30 |
+
if y is None or y == []:
|
31 |
+
return []
|
32 |
+
temp = []
|
33 |
+
for x in y:
|
34 |
+
user, bot = x
|
35 |
+
if not detect_converted_mark(user):
|
36 |
+
user = convert_asis(user)
|
37 |
+
if not detect_converted_mark(bot):
|
38 |
+
bot = convert_mdtext(bot)
|
39 |
+
temp.append((user, bot))
|
40 |
+
return temp
|
41 |
+
|
42 |
+
with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
|
43 |
+
customJS = f.read()
|
44 |
+
kelpyCodos = f2.read()
|
45 |
+
|
46 |
+
def reload_javascript():
|
47 |
+
print("Reloading javascript...")
|
48 |
+
js = f'<script>{customJS}</script><script>{kelpyCodos}</script>'
|
49 |
+
def template_response(*args, **kwargs):
|
50 |
+
res = GradioTemplateResponseOriginal(*args, **kwargs)
|
51 |
+
res.body = res.body.replace(b'</html>', f'{js}</html>'.encode("utf8"))
|
52 |
+
res.init_headers()
|
53 |
+
return res
|
54 |
+
|
55 |
+
gr.routes.templates.TemplateResponse = template_response
|
56 |
+
|
57 |
+
GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse
|
app_modules/presets.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
|
4 |
+
title = """<h1 align="center" style="min-width:200px; margin-top:0;"> <img src="https://raw.githubusercontent.com/jiangjiechen/jiangjiechen.github.io/1f23a6b72b7e0b57a54e31a583c9f668a2b8b4b6/media/icon_hua9f9b78e35233aa477f7219cbf68418f_67044_512x512_fill_lanczos_center_3.png" width="32px" style="display: inline"> Auction Arena </h1>"""
|
5 |
+
|
6 |
+
description_top = """\
|
7 |
+
<div align="center">
|
8 |
+
<p>
|
9 |
+
An interactive demo for this paper: <a href="https://auction-arena.github.io">Put Your Money Where Your Mouth Is: Evaluating Strategic Planning and Execution of LLM Agents in an Auction Arena</a>. Details of this work can be found at <a href="https://auction-arena.github.io">this page</a>. You can watch AI vs AI in this auction arena, or you can set `model_name=human` to engage in the arena personally. Please enter your API key before start, otherwise you will have errors (please refresh the page if you do). Feel free to <a href="mailto:[email protected]">contact us</a> if you have any questions!
|
10 |
+
</p >
|
11 |
+
</div>
|
12 |
+
"""
|
13 |
+
description = """\
|
14 |
+
<div align="center" style="margin:16px 0">
|
15 |
+
The demo is built on <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT">ChuanhuChatGPT</a>.
|
16 |
+
</div>
|
17 |
+
"""
|
18 |
+
|
19 |
+
small_and_beautiful_theme = gr.themes.Soft(
|
20 |
+
primary_hue=gr.themes.Color(
|
21 |
+
c50="#02C160",
|
22 |
+
c100="rgba(2, 193, 96, 0.2)",
|
23 |
+
c200="#02C160",
|
24 |
+
c300="rgba(2, 193, 96, 0.32)",
|
25 |
+
c400="rgba(2, 193, 96, 0.32)",
|
26 |
+
c500="rgba(2, 193, 96, 1.0)",
|
27 |
+
c600="rgba(2, 193, 96, 1.0)",
|
28 |
+
c700="rgba(2, 193, 96, 0.32)",
|
29 |
+
c800="rgba(2, 193, 96, 0.32)",
|
30 |
+
c900="#02C160",
|
31 |
+
c950="#02C160",
|
32 |
+
),
|
33 |
+
secondary_hue=gr.themes.Color(
|
34 |
+
c50="#576b95",
|
35 |
+
c100="#576b95",
|
36 |
+
c200="#576b95",
|
37 |
+
c300="#576b95",
|
38 |
+
c400="#576b95",
|
39 |
+
c500="#576b95",
|
40 |
+
c600="#576b95",
|
41 |
+
c700="#576b95",
|
42 |
+
c800="#576b95",
|
43 |
+
c900="#576b95",
|
44 |
+
c950="#576b95",
|
45 |
+
),
|
46 |
+
neutral_hue=gr.themes.Color(
|
47 |
+
name="gray",
|
48 |
+
c50="#f9fafb",
|
49 |
+
c100="#f3f4f6",
|
50 |
+
c200="#e5e7eb",
|
51 |
+
c300="#d1d5db",
|
52 |
+
c400="#B2B2B2",
|
53 |
+
c500="#808080",
|
54 |
+
c600="#636363",
|
55 |
+
c700="#515151",
|
56 |
+
c800="#393939",
|
57 |
+
c900="#272727",
|
58 |
+
c950="#171717",
|
59 |
+
),
|
60 |
+
radius_size=gr.themes.sizes.radius_sm,
|
61 |
+
).set(
|
62 |
+
button_primary_background_fill="#06AE56",
|
63 |
+
button_primary_background_fill_dark="#06AE56",
|
64 |
+
button_primary_background_fill_hover="#07C863",
|
65 |
+
button_primary_border_color="#06AE56",
|
66 |
+
button_primary_border_color_dark="#06AE56",
|
67 |
+
button_primary_text_color="#FFFFFF",
|
68 |
+
button_primary_text_color_dark="#FFFFFF",
|
69 |
+
button_secondary_background_fill="#F2F2F2",
|
70 |
+
button_secondary_background_fill_dark="#2B2B2B",
|
71 |
+
button_secondary_text_color="#393939",
|
72 |
+
button_secondary_text_color_dark="#FFFFFF",
|
73 |
+
# background_fill_primary="#F7F7F7",
|
74 |
+
# background_fill_primary_dark="#1F1F1F",
|
75 |
+
block_title_text_color="*primary_500",
|
76 |
+
block_title_background_fill="*primary_100",
|
77 |
+
input_background_fill="#F6F6F6",
|
78 |
+
)
|
app_modules/utils.py
ADDED
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
from __future__ import annotations
|
3 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
|
4 |
+
import logging
|
5 |
+
# import json
|
6 |
+
# import os
|
7 |
+
# import datetime
|
8 |
+
# import hashlib
|
9 |
+
# import csv
|
10 |
+
# import requests
|
11 |
+
import re
|
12 |
+
import html
|
13 |
+
# import markdown2
|
14 |
+
import torch
|
15 |
+
import sys
|
16 |
+
import gc
|
17 |
+
from pygments.lexers import guess_lexer, ClassNotFound
|
18 |
+
|
19 |
+
import gradio as gr
|
20 |
+
# from pypinyin import lazy_pinyin
|
21 |
+
# import tiktoken
|
22 |
+
# import mdtex2html
|
23 |
+
# from markdown import markdown
|
24 |
+
from pygments import highlight
|
25 |
+
from pygments.lexers import guess_lexer,get_lexer_by_name
|
26 |
+
from pygments.formatters import HtmlFormatter
|
27 |
+
# import transformers
|
28 |
+
# from peft import PeftModel
|
29 |
+
# from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
|
30 |
+
|
31 |
+
from app_modules.presets import *
|
32 |
+
|
33 |
+
# logging.basicConfig(
|
34 |
+
# level=logging.INFO,
|
35 |
+
# format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s",
|
36 |
+
# )
|
37 |
+
|
38 |
+
|
39 |
+
def markdown_to_html_with_syntax_highlight(md_str):
|
40 |
+
def replacer(match):
|
41 |
+
lang = match.group(1) or "text"
|
42 |
+
code = match.group(2)
|
43 |
+
lang = lang.strip()
|
44 |
+
#print(1,lang)
|
45 |
+
if lang=="text":
|
46 |
+
lexer = guess_lexer(code)
|
47 |
+
lang = lexer.name
|
48 |
+
#print(2,lang)
|
49 |
+
try:
|
50 |
+
lexer = get_lexer_by_name(lang, stripall=True)
|
51 |
+
except ValueError:
|
52 |
+
lexer = get_lexer_by_name("python", stripall=True)
|
53 |
+
formatter = HtmlFormatter()
|
54 |
+
#print(3,lexer.name)
|
55 |
+
highlighted_code = highlight(code, lexer, formatter)
|
56 |
+
|
57 |
+
return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
|
58 |
+
|
59 |
+
code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
|
60 |
+
md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
|
61 |
+
|
62 |
+
html_str = markdown(md_str)
|
63 |
+
return html_str
|
64 |
+
|
65 |
+
|
66 |
+
def normalize_markdown(md_text: str) -> str:
|
67 |
+
lines = md_text.split("\n")
|
68 |
+
normalized_lines = []
|
69 |
+
inside_list = False
|
70 |
+
|
71 |
+
for i, line in enumerate(lines):
|
72 |
+
if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
|
73 |
+
if not inside_list and i > 0 and lines[i - 1].strip() != "":
|
74 |
+
normalized_lines.append("")
|
75 |
+
inside_list = True
|
76 |
+
normalized_lines.append(line)
|
77 |
+
elif inside_list and line.strip() == "":
|
78 |
+
if i < len(lines) - 1 and not re.match(
|
79 |
+
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
|
80 |
+
):
|
81 |
+
normalized_lines.append(line)
|
82 |
+
continue
|
83 |
+
else:
|
84 |
+
inside_list = False
|
85 |
+
normalized_lines.append(line)
|
86 |
+
|
87 |
+
return "\n".join(normalized_lines)
|
88 |
+
|
89 |
+
|
90 |
+
def convert_mdtext(md_text):
|
91 |
+
code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
|
92 |
+
inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
|
93 |
+
code_blocks = code_block_pattern.findall(md_text)
|
94 |
+
non_code_parts = code_block_pattern.split(md_text)[::2]
|
95 |
+
|
96 |
+
result = []
|
97 |
+
for non_code, code in zip(non_code_parts, code_blocks + [""]):
|
98 |
+
if non_code.strip():
|
99 |
+
non_code = normalize_markdown(non_code)
|
100 |
+
if inline_code_pattern.search(non_code):
|
101 |
+
result.append(markdown(non_code, extensions=["tables"]))
|
102 |
+
else:
|
103 |
+
result.append(mdtex2html.convert(non_code, extensions=["tables"]))
|
104 |
+
if code.strip():
|
105 |
+
code = f"\n```{code}\n\n```"
|
106 |
+
code = markdown_to_html_with_syntax_highlight(code)
|
107 |
+
result.append(code)
|
108 |
+
result = "".join(result)
|
109 |
+
result += ALREADY_CONVERTED_MARK
|
110 |
+
return result
|
111 |
+
|
112 |
+
def convert_asis(userinput):
|
113 |
+
return f"<p style=\"white-space:pre-wrap;\">{html.escape(userinput)}</p>"+ALREADY_CONVERTED_MARK
|
114 |
+
|
115 |
+
def detect_converted_mark(userinput):
|
116 |
+
if userinput.endswith(ALREADY_CONVERTED_MARK):
|
117 |
+
return True
|
118 |
+
else:
|
119 |
+
return False
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
def detect_language(code):
|
124 |
+
if code.startswith("\n"):
|
125 |
+
first_line = ""
|
126 |
+
else:
|
127 |
+
first_line = code.strip().split("\n", 1)[0]
|
128 |
+
language = first_line.lower() if first_line else ""
|
129 |
+
code_without_language = code[len(first_line) :].lstrip() if first_line else code
|
130 |
+
return language, code_without_language
|
131 |
+
|
132 |
+
def convert_to_markdown(text):
|
133 |
+
text = text.replace("$","$")
|
134 |
+
def replace_leading_tabs_and_spaces(line):
|
135 |
+
new_line = []
|
136 |
+
|
137 |
+
for char in line:
|
138 |
+
if char == "\t":
|
139 |
+
new_line.append("	")
|
140 |
+
elif char == " ":
|
141 |
+
new_line.append(" ")
|
142 |
+
else:
|
143 |
+
break
|
144 |
+
return "".join(new_line) + line[len(new_line):]
|
145 |
+
|
146 |
+
markdown_text = ""
|
147 |
+
lines = text.split("\n")
|
148 |
+
in_code_block = False
|
149 |
+
|
150 |
+
for line in lines:
|
151 |
+
if in_code_block is False and line.startswith("```"):
|
152 |
+
in_code_block = True
|
153 |
+
markdown_text += f"{line}\n"
|
154 |
+
elif in_code_block is True and line.startswith("```"):
|
155 |
+
in_code_block = False
|
156 |
+
markdown_text += f"{line}\n"
|
157 |
+
elif in_code_block:
|
158 |
+
markdown_text += f"{line}\n"
|
159 |
+
else:
|
160 |
+
line = replace_leading_tabs_and_spaces(line)
|
161 |
+
line = re.sub(r"^(#)", r"\\\1", line)
|
162 |
+
markdown_text += f"{line} \n"
|
163 |
+
|
164 |
+
return markdown_text
|
165 |
+
|
166 |
+
def add_language_tag(text):
|
167 |
+
def detect_language(code_block):
|
168 |
+
try:
|
169 |
+
lexer = guess_lexer(code_block)
|
170 |
+
return lexer.name.lower()
|
171 |
+
except ClassNotFound:
|
172 |
+
return ""
|
173 |
+
|
174 |
+
code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE)
|
175 |
+
|
176 |
+
def replacement(match):
|
177 |
+
code_block = match.group(2)
|
178 |
+
if match.group(2).startswith("\n"):
|
179 |
+
language = detect_language(code_block)
|
180 |
+
if language:
|
181 |
+
return f"```{language}{code_block}```"
|
182 |
+
else:
|
183 |
+
return f"```\n{code_block}```"
|
184 |
+
else:
|
185 |
+
return match.group(1) + code_block + "```"
|
186 |
+
|
187 |
+
text2 = code_block_pattern.sub(replacement, text)
|
188 |
+
return text2
|
189 |
+
|
190 |
+
def delete_last_conversation(chatbot, history):
|
191 |
+
if len(chatbot) > 0:
|
192 |
+
chatbot.pop()
|
193 |
+
|
194 |
+
if len(history) > 0:
|
195 |
+
history.pop()
|
196 |
+
|
197 |
+
return (
|
198 |
+
chatbot,
|
199 |
+
history,
|
200 |
+
"Delete Done",
|
201 |
+
)
|
202 |
+
|
203 |
+
def reset_state():
|
204 |
+
return [], [], "Reset Done"
|
205 |
+
|
206 |
+
def reset_textbox():
|
207 |
+
return gr.update(value=""),""
|
208 |
+
|
209 |
+
def cancel_outputing():
|
210 |
+
return "Stop Done"
|
211 |
+
|
212 |
+
def transfer_input(inputs):
|
213 |
+
textbox = reset_textbox()
|
214 |
+
return (
|
215 |
+
inputs,
|
216 |
+
gr.update(value=""),
|
217 |
+
gr.Button.update(visible=True),
|
218 |
+
)
|
219 |
+
|
220 |
+
|
221 |
+
class State:
|
222 |
+
interrupted = False
|
223 |
+
|
224 |
+
def interrupt(self):
|
225 |
+
self.interrupted = True
|
226 |
+
|
227 |
+
def recover(self):
|
228 |
+
self.interrupted = False
|
229 |
+
shared_state = State()
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
# Greedy Search
|
236 |
+
def greedy_search(input_ids: torch.Tensor,
|
237 |
+
model: torch.nn.Module,
|
238 |
+
tokenizer: transformers.PreTrainedTokenizer,
|
239 |
+
stop_words: list,
|
240 |
+
max_length: int,
|
241 |
+
temperature: float = 1.0,
|
242 |
+
top_p: float = 1.0,
|
243 |
+
top_k: int = 25) -> Iterator[str]:
|
244 |
+
generated_tokens = []
|
245 |
+
past_key_values = None
|
246 |
+
current_length = 1
|
247 |
+
for i in range(max_length):
|
248 |
+
with torch.no_grad():
|
249 |
+
if past_key_values is None:
|
250 |
+
outputs = model(input_ids)
|
251 |
+
else:
|
252 |
+
outputs = model(input_ids[:, -1:], past_key_values=past_key_values)
|
253 |
+
logits = outputs.logits[:, -1, :]
|
254 |
+
past_key_values = outputs.past_key_values
|
255 |
+
|
256 |
+
# apply temperature
|
257 |
+
logits /= temperature
|
258 |
+
|
259 |
+
probs = torch.softmax(logits, dim=-1)
|
260 |
+
# apply top_p
|
261 |
+
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
|
262 |
+
probs_sum = torch.cumsum(probs_sort, dim=-1)
|
263 |
+
mask = probs_sum - probs_sort > top_p
|
264 |
+
probs_sort[mask] = 0.0
|
265 |
+
|
266 |
+
# apply top_k
|
267 |
+
#if top_k is not None:
|
268 |
+
# probs_sort1, _ = torch.topk(probs_sort, top_k)
|
269 |
+
# min_top_probs_sort = torch.min(probs_sort1, dim=-1, keepdim=True).values
|
270 |
+
# probs_sort = torch.where(probs_sort < min_top_probs_sort, torch.full_like(probs_sort, float(0.0)), probs_sort)
|
271 |
+
|
272 |
+
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
|
273 |
+
next_token = torch.multinomial(probs_sort, num_samples=1)
|
274 |
+
next_token = torch.gather(probs_idx, -1, next_token)
|
275 |
+
|
276 |
+
input_ids = torch.cat((input_ids, next_token), dim=-1)
|
277 |
+
|
278 |
+
generated_tokens.append(next_token[0].item())
|
279 |
+
text = tokenizer.decode(generated_tokens)
|
280 |
+
|
281 |
+
yield text
|
282 |
+
if any([x in text for x in stop_words]):
|
283 |
+
del past_key_values
|
284 |
+
del logits
|
285 |
+
del probs
|
286 |
+
del probs_sort
|
287 |
+
del probs_idx
|
288 |
+
del probs_sum
|
289 |
+
gc.collect()
|
290 |
+
return
|
291 |
+
|
292 |
+
def generate_prompt_with_history(text,history,tokenizer,max_length=2048):
|
293 |
+
prompt = "The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n[|Human|]Hello!\n[|AI|]Hi!"
|
294 |
+
history = ["\n[|Human|]{}\n[|AI|]{}".format(x[0],x[1]) for x in history]
|
295 |
+
history.append("\n[|Human|]{}\n[|AI|]".format(text))
|
296 |
+
history_text = ""
|
297 |
+
flag = False
|
298 |
+
for x in history[::-1]:
|
299 |
+
if tokenizer(prompt+history_text+x, return_tensors="pt")['input_ids'].size(-1) <= max_length:
|
300 |
+
history_text = x + history_text
|
301 |
+
flag = True
|
302 |
+
else:
|
303 |
+
break
|
304 |
+
if flag:
|
305 |
+
return prompt+history_text,tokenizer(prompt+history_text, return_tensors="pt")
|
306 |
+
else:
|
307 |
+
return None
|
308 |
+
|
309 |
+
|
310 |
+
def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
|
311 |
+
for stop_word in stop_words:
|
312 |
+
if s.endswith(stop_word):
|
313 |
+
return True
|
314 |
+
for i in range(1, len(stop_word)):
|
315 |
+
if s.endswith(stop_word[:i]):
|
316 |
+
return True
|
317 |
+
return False
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
def load_tokenizer_and_model(base_model,adapter_model=None,load_8bit=False):
|
322 |
+
if torch.cuda.is_available():
|
323 |
+
device = "cuda"
|
324 |
+
else:
|
325 |
+
device = "cpu"
|
326 |
+
|
327 |
+
try:
|
328 |
+
if torch.backends.mps.is_available():
|
329 |
+
device = "mps"
|
330 |
+
except: # noqa: E722
|
331 |
+
pass
|
332 |
+
tokenizer = LlamaTokenizer.from_pretrained(base_model)
|
333 |
+
if device == "cuda":
|
334 |
+
model = LlamaForCausalLM.from_pretrained(
|
335 |
+
base_model,
|
336 |
+
load_in_8bit=load_8bit,
|
337 |
+
torch_dtype=torch.float16,
|
338 |
+
device_map="auto",
|
339 |
+
)
|
340 |
+
if adapter_model is not None:
|
341 |
+
model = PeftModel.from_pretrained(
|
342 |
+
model,
|
343 |
+
adapter_model,
|
344 |
+
torch_dtype=torch.float16,
|
345 |
+
)
|
346 |
+
elif device == "mps":
|
347 |
+
model = LlamaForCausalLM.from_pretrained(
|
348 |
+
base_model,
|
349 |
+
device_map={"": device},
|
350 |
+
torch_dtype=torch.float16,
|
351 |
+
)
|
352 |
+
if adapter_model is not None:
|
353 |
+
model = PeftModel.from_pretrained(
|
354 |
+
model,
|
355 |
+
adapter_model,
|
356 |
+
device_map={"": device},
|
357 |
+
torch_dtype=torch.float16,
|
358 |
+
)
|
359 |
+
else:
|
360 |
+
model = LlamaForCausalLM.from_pretrained(
|
361 |
+
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
362 |
+
)
|
363 |
+
if adapter_model is not None:
|
364 |
+
model = PeftModel.from_pretrained(
|
365 |
+
model,
|
366 |
+
adapter_model,
|
367 |
+
device_map={"": device},
|
368 |
+
)
|
369 |
+
|
370 |
+
if not load_8bit:
|
371 |
+
model.half() # seems to fix bugs for some users.
|
372 |
+
|
373 |
+
model.eval()
|
374 |
+
return tokenizer,model,device
|
375 |
+
|
assets/Kelpy-Codos.js
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// ==UserScript==
|
2 |
+
// @name Kelpy Codos
|
3 |
+
// @namespace https://github.com/Keldos-Li/Kelpy-Codos
|
4 |
+
// @version 1.0.5
|
5 |
+
// @author Keldos; https://keldos.me/
|
6 |
+
// @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially.
|
7 |
+
// Based on Chuanhu ChatGPT version: ac04408 (2023-3-22)
|
8 |
+
// @license GPL-3.0
|
9 |
+
// @grant none
|
10 |
+
// ==/UserScript==
|
11 |
+
|
12 |
+
(function () {
|
13 |
+
'use strict';
|
14 |
+
|
15 |
+
function addCopyButton(pre) {
|
16 |
+
var code = pre.querySelector('code');
|
17 |
+
if (!code) {
|
18 |
+
return; // 如果没有找到 <code> 元素,则不添加按钮
|
19 |
+
}
|
20 |
+
var firstChild = code.firstChild;
|
21 |
+
if (!firstChild) {
|
22 |
+
return; // 如果 <code> 元素没有子节点,则不添加按钮
|
23 |
+
}
|
24 |
+
var button = document.createElement('button');
|
25 |
+
button.textContent = '\uD83D\uDCCE'; // 使用 📎 符号作为“复制”按钮的文本
|
26 |
+
button.style.position = 'relative';
|
27 |
+
button.style.float = 'right';
|
28 |
+
button.style.fontSize = '1em'; // 可选:调整按钮大小
|
29 |
+
button.style.background = 'none'; // 可选:去掉背景颜色
|
30 |
+
button.style.border = 'none'; // 可选:去掉边框
|
31 |
+
button.style.cursor = 'pointer'; // 可选:显示指针样式
|
32 |
+
button.addEventListener('click', function () {
|
33 |
+
var range = document.createRange();
|
34 |
+
range.selectNodeContents(code);
|
35 |
+
range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前
|
36 |
+
var selection = window.getSelection();
|
37 |
+
selection.removeAllRanges();
|
38 |
+
selection.addRange(range);
|
39 |
+
|
40 |
+
try {
|
41 |
+
var success = document.execCommand('copy');
|
42 |
+
if (success) {
|
43 |
+
button.textContent = '\u2714';
|
44 |
+
setTimeout(function () {
|
45 |
+
button.textContent = '\uD83D\uDCCE'; // 恢复按钮为“复制”
|
46 |
+
}, 2000);
|
47 |
+
} else {
|
48 |
+
button.textContent = '\u2716';
|
49 |
+
}
|
50 |
+
} catch (e) {
|
51 |
+
console.error(e);
|
52 |
+
button.textContent = '\u2716';
|
53 |
+
}
|
54 |
+
|
55 |
+
selection.removeAllRanges();
|
56 |
+
});
|
57 |
+
code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前
|
58 |
+
}
|
59 |
+
|
60 |
+
function handleNewElements(mutationsList, observer) {
|
61 |
+
for (var mutation of mutationsList) {
|
62 |
+
if (mutation.type === 'childList') {
|
63 |
+
for (var node of mutation.addedNodes) {
|
64 |
+
if (node.nodeName === 'PRE') {
|
65 |
+
addCopyButton(node);
|
66 |
+
}
|
67 |
+
}
|
68 |
+
}
|
69 |
+
}
|
70 |
+
}
|
71 |
+
|
72 |
+
var observer = new MutationObserver(handleNewElements);
|
73 |
+
observer.observe(document.documentElement, { childList: true, subtree: true });
|
74 |
+
|
75 |
+
document.querySelectorAll('pre').forEach(addCopyButton);
|
76 |
+
})();
|
assets/custom.css
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:root {
|
2 |
+
--chatbot-color-light: #F3F3F3;
|
3 |
+
--chatbot-color-dark: #121111;
|
4 |
+
}
|
5 |
+
|
6 |
+
/* status_display */
|
7 |
+
#status_display {
|
8 |
+
display: flex;
|
9 |
+
min-height: 2.5em;
|
10 |
+
align-items: flex-end;
|
11 |
+
justify-content: flex-end;
|
12 |
+
}
|
13 |
+
#status_display p {
|
14 |
+
font-size: .85em;
|
15 |
+
font-family: monospace;
|
16 |
+
color: var(--body-text-color-subdued);
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
/* usage_display */
|
22 |
+
#usage_display {
|
23 |
+
height: 1em;
|
24 |
+
}
|
25 |
+
#usage_display p{
|
26 |
+
padding: 0 1em;
|
27 |
+
font-size: .85em;
|
28 |
+
font-family: monospace;
|
29 |
+
color: var(--body-text-color-subdued);
|
30 |
+
}
|
31 |
+
/* list */
|
32 |
+
ol:not(.options), ul:not(.options) {
|
33 |
+
padding-inline-start: 2em !important;
|
34 |
+
}
|
35 |
+
|
36 |
+
/* Thank @Keldos-Li for fixing it */
|
37 |
+
/* Light mode (default) */
|
38 |
+
#chuanhu_chatbot {
|
39 |
+
background-color: var(--chatbot-color-light) !important;
|
40 |
+
color: #000000 !important;
|
41 |
+
}
|
42 |
+
[data-testid = "bot"] {
|
43 |
+
background-color: #FFFFFF !important;
|
44 |
+
}
|
45 |
+
[data-testid = "user"] {
|
46 |
+
background-color: #95EC69 !important;
|
47 |
+
}
|
48 |
+
|
49 |
+
/* Dark mode */
|
50 |
+
.dark #chuanhu_chatbot {
|
51 |
+
background-color: var(--chatbot-color-dark) !important;
|
52 |
+
color: #FFFFFF !important;
|
53 |
+
}
|
54 |
+
.dark [data-testid = "bot"] {
|
55 |
+
background-color: #2C2C2C !important;
|
56 |
+
}
|
57 |
+
.dark [data-testid = "user"] {
|
58 |
+
background-color: #26B561 !important;
|
59 |
+
}
|
60 |
+
|
61 |
+
#chuanhu_chatbot {
|
62 |
+
height: 100%;
|
63 |
+
min-height: 400px;
|
64 |
+
}
|
65 |
+
|
66 |
+
[class *= "message"] {
|
67 |
+
border-radius: var(--radius-xl) !important;
|
68 |
+
border: none;
|
69 |
+
padding: var(--spacing-xl) !important;
|
70 |
+
font-size: var(--text-md) !important;
|
71 |
+
line-height: var(--line-md) !important;
|
72 |
+
min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
73 |
+
min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
|
74 |
+
}
|
75 |
+
[data-testid = "bot"] {
|
76 |
+
max-width: 85%;
|
77 |
+
border-bottom-left-radius: 0 !important;
|
78 |
+
}
|
79 |
+
[data-testid = "user"] {
|
80 |
+
max-width: 85%;
|
81 |
+
width: auto !important;
|
82 |
+
border-bottom-right-radius: 0 !important;
|
83 |
+
}
|
84 |
+
/* Table */
|
85 |
+
table {
|
86 |
+
margin: 1em 0;
|
87 |
+
border-collapse: collapse;
|
88 |
+
empty-cells: show;
|
89 |
+
}
|
90 |
+
td,th {
|
91 |
+
border: 1.2px solid var(--border-color-primary) !important;
|
92 |
+
padding: 0.2em;
|
93 |
+
}
|
94 |
+
thead {
|
95 |
+
background-color: rgba(175,184,193,0.2);
|
96 |
+
}
|
97 |
+
thead th {
|
98 |
+
padding: .5em .2em;
|
99 |
+
}
|
100 |
+
/* Inline code */
|
101 |
+
#chuanhu_chatbot code {
|
102 |
+
display: inline;
|
103 |
+
white-space: break-spaces;
|
104 |
+
border-radius: 6px;
|
105 |
+
margin: 0 2px 0 2px;
|
106 |
+
padding: .2em .4em .1em .4em;
|
107 |
+
background-color: rgba(175,184,193,0.2);
|
108 |
+
}
|
109 |
+
/* Code block */
|
110 |
+
#chuanhu_chatbot pre code {
|
111 |
+
display: block;
|
112 |
+
overflow: auto;
|
113 |
+
white-space: pre;
|
114 |
+
background-color: hsla(0, 0%, 0%, 80%)!important;
|
115 |
+
border-radius: 10px;
|
116 |
+
padding: 1.4em 1.2em 0em 1.4em;
|
117 |
+
margin: 1.2em 2em 1.2em 0.5em;
|
118 |
+
color: #FFF;
|
119 |
+
box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
|
120 |
+
}
|
121 |
+
/* Hightlight */
|
122 |
+
#chuanhu_chatbot .highlight { background-color: transparent }
|
123 |
+
#chuanhu_chatbot .highlight .hll { background-color: #49483e }
|
124 |
+
#chuanhu_chatbot .highlight .c { color: #75715e } /* Comment */
|
125 |
+
#chuanhu_chatbot .highlight .err { color: #960050; background-color: #1e0010 } /* Error */
|
126 |
+
#chuanhu_chatbot .highlight .k { color: #66d9ef } /* Keyword */
|
127 |
+
#chuanhu_chatbot .highlight .l { color: #ae81ff } /* Literal */
|
128 |
+
#chuanhu_chatbot .highlight .n { color: #f8f8f2 } /* Name */
|
129 |
+
#chuanhu_chatbot .highlight .o { color: #f92672 } /* Operator */
|
130 |
+
#chuanhu_chatbot .highlight .p { color: #f8f8f2 } /* Punctuation */
|
131 |
+
#chuanhu_chatbot .highlight .ch { color: #75715e } /* Comment.Hashbang */
|
132 |
+
#chuanhu_chatbot .highlight .cm { color: #75715e } /* Comment.Multiline */
|
133 |
+
#chuanhu_chatbot .highlight .cp { color: #75715e } /* Comment.Preproc */
|
134 |
+
#chuanhu_chatbot .highlight .cpf { color: #75715e } /* Comment.PreprocFile */
|
135 |
+
#chuanhu_chatbot .highlight .c1 { color: #75715e } /* Comment.Single */
|
136 |
+
#chuanhu_chatbot .highlight .cs { color: #75715e } /* Comment.Special */
|
137 |
+
#chuanhu_chatbot .highlight .gd { color: #f92672 } /* Generic.Deleted */
|
138 |
+
#chuanhu_chatbot .highlight .ge { font-style: italic } /* Generic.Emph */
|
139 |
+
#chuanhu_chatbot .highlight .gi { color: #a6e22e } /* Generic.Inserted */
|
140 |
+
#chuanhu_chatbot .highlight .gs { font-weight: bold } /* Generic.Strong */
|
141 |
+
#chuanhu_chatbot .highlight .gu { color: #75715e } /* Generic.Subheading */
|
142 |
+
#chuanhu_chatbot .highlight .kc { color: #66d9ef } /* Keyword.Constant */
|
143 |
+
#chuanhu_chatbot .highlight .kd { color: #66d9ef } /* Keyword.Declaration */
|
144 |
+
#chuanhu_chatbot .highlight .kn { color: #f92672 } /* Keyword.Namespace */
|
145 |
+
#chuanhu_chatbot .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
|
146 |
+
#chuanhu_chatbot .highlight .kr { color: #66d9ef } /* Keyword.Reserved */
|
147 |
+
#chuanhu_chatbot .highlight .kt { color: #66d9ef } /* Keyword.Type */
|
148 |
+
#chuanhu_chatbot .highlight .ld { color: #e6db74 } /* Literal.Date */
|
149 |
+
#chuanhu_chatbot .highlight .m { color: #ae81ff } /* Literal.Number */
|
150 |
+
#chuanhu_chatbot .highlight .s { color: #e6db74 } /* Literal.String */
|
151 |
+
#chuanhu_chatbot .highlight .na { color: #a6e22e } /* Name.Attribute */
|
152 |
+
#chuanhu_chatbot .highlight .nb { color: #f8f8f2 } /* Name.Builtin */
|
153 |
+
#chuanhu_chatbot .highlight .nc { color: #a6e22e } /* Name.Class */
|
154 |
+
#chuanhu_chatbot .highlight .no { color: #66d9ef } /* Name.Constant */
|
155 |
+
#chuanhu_chatbot .highlight .nd { color: #a6e22e } /* Name.Decorator */
|
156 |
+
#chuanhu_chatbot .highlight .ni { color: #f8f8f2 } /* Name.Entity */
|
157 |
+
#chuanhu_chatbot .highlight .ne { color: #a6e22e } /* Name.Exception */
|
158 |
+
#chuanhu_chatbot .highlight .nf { color: #a6e22e } /* Name.Function */
|
159 |
+
#chuanhu_chatbot .highlight .nl { color: #f8f8f2 } /* Name.Label */
|
160 |
+
#chuanhu_chatbot .highlight .nn { color: #f8f8f2 } /* Name.Namespace */
|
161 |
+
#chuanhu_chatbot .highlight .nx { color: #a6e22e } /* Name.Other */
|
162 |
+
#chuanhu_chatbot .highlight .py { color: #f8f8f2 } /* Name.Property */
|
163 |
+
#chuanhu_chatbot .highlight .nt { color: #f92672 } /* Name.Tag */
|
164 |
+
#chuanhu_chatbot .highlight .nv { color: #f8f8f2 } /* Name.Variable */
|
165 |
+
#chuanhu_chatbot .highlight .ow { color: #f92672 } /* Operator.Word */
|
166 |
+
#chuanhu_chatbot .highlight .w { color: #f8f8f2 } /* Text.Whitespace */
|
167 |
+
#chuanhu_chatbot .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
|
168 |
+
#chuanhu_chatbot .highlight .mf { color: #ae81ff } /* Literal.Number.Float */
|
169 |
+
#chuanhu_chatbot .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
|
170 |
+
#chuanhu_chatbot .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
|
171 |
+
#chuanhu_chatbot .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
|
172 |
+
#chuanhu_chatbot .highlight .sa { color: #e6db74 } /* Literal.String.Affix */
|
173 |
+
#chuanhu_chatbot .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
|
174 |
+
#chuanhu_chatbot .highlight .sc { color: #e6db74 } /* Literal.String.Char */
|
175 |
+
#chuanhu_chatbot .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
|
176 |
+
#chuanhu_chatbot .highlight .sd { color: #e6db74 } /* Literal.String.Doc */
|
177 |
+
#chuanhu_chatbot .highlight .s2 { color: #e6db74 } /* Literal.String.Double */
|
178 |
+
#chuanhu_chatbot .highlight .se { color: #ae81ff } /* Literal.String.Escape */
|
179 |
+
#chuanhu_chatbot .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
|
180 |
+
#chuanhu_chatbot .highlight .si { color: #e6db74 } /* Literal.String.Interpol */
|
181 |
+
#chuanhu_chatbot .highlight .sx { color: #e6db74 } /* Literal.String.Other */
|
182 |
+
#chuanhu_chatbot .highlight .sr { color: #e6db74 } /* Literal.String.Regex */
|
183 |
+
#chuanhu_chatbot .highlight .s1 { color: #e6db74 } /* Literal.String.Single */
|
184 |
+
#chuanhu_chatbot .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
|
185 |
+
#chuanhu_chatbot .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
|
186 |
+
#chuanhu_chatbot .highlight .fm { color: #a6e22e } /* Name.Function.Magic */
|
187 |
+
#chuanhu_chatbot .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
|
188 |
+
#chuanhu_chatbot .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
|
189 |
+
#chuanhu_chatbot .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
|
190 |
+
#chuanhu_chatbot .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
|
191 |
+
#chuanhu_chatbot .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
|
assets/custom.js
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
// custom javascript here
|
assets/favicon.ico
ADDED
assets/totopower-removebg.png
ADDED
auction_workflow.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import gradio as gr
|
4 |
+
import ujson as json
|
5 |
+
import traceback
|
6 |
+
from typing import List
|
7 |
+
from tqdm import tqdm
|
8 |
+
from src.auctioneer_base import Auctioneer
|
9 |
+
from src.bidder_base import Bidder, bidders_to_chatbots, bidding_multithread
|
10 |
+
from utils import trace_back
|
11 |
+
|
12 |
+
|
13 |
+
LOG_DIR = 'logs'
|
14 |
+
enable_gr = gr.update(interactive=True)
|
15 |
+
disable_gr = gr.update(interactive=False)
|
16 |
+
|
17 |
+
|
18 |
+
def monitor_all(bidder_list: List[Bidder]):
|
19 |
+
return sum([bidder.to_monitors() for bidder in bidder_list], [])
|
20 |
+
|
21 |
+
|
22 |
+
def parse_bid_price(auctioneer: Auctioneer, bidder: Bidder, msg: str):
|
23 |
+
# rebid if the message is not parsible into a bid price
|
24 |
+
bid_price = auctioneer.parse_bid(msg)
|
25 |
+
while bid_price is None:
|
26 |
+
re_msg = bidder.bid("You must be clear about your bidding decision, say either \"I'm out!\" or \"I bid $xxx!\". Please rebid.")
|
27 |
+
bid_price = auctioneer.parse_bid(re_msg)
|
28 |
+
print(f"{bidder.name} rebid: {re_msg}")
|
29 |
+
return bid_price
|
30 |
+
|
31 |
+
|
32 |
+
def enable_human_box(bidder_list):
|
33 |
+
signals = []
|
34 |
+
for bidder in bidder_list:
|
35 |
+
if 'human' in bidder.model_name and not bidder.withdraw:
|
36 |
+
signals.append(gr.update(interactive=True, visible=True,
|
37 |
+
placeholder="Please bid! Enter \"I'm out\" or \"I bid $xxx\"."))
|
38 |
+
else:
|
39 |
+
signals.append(disable_gr)
|
40 |
+
return signals
|
41 |
+
|
42 |
+
|
43 |
+
def disable_all_box(bidder_list):
|
44 |
+
signals = []
|
45 |
+
for bidder in bidder_list:
|
46 |
+
if 'human' in bidder.model_name:
|
47 |
+
signals.append(gr.update(interactive=False, visible=True,
|
48 |
+
placeholder="Wait a moment to engage in the auction."))
|
49 |
+
else:
|
50 |
+
signals.append(gr.update(interactive=False, visible=False))
|
51 |
+
return signals
|
52 |
+
|
53 |
+
|
54 |
+
def run_auction(
|
55 |
+
auction_hash: str,
|
56 |
+
auctioneer: Auctioneer,
|
57 |
+
bidder_list: List[Bidder],
|
58 |
+
thread_num: int,
|
59 |
+
yield_for_demo=True,
|
60 |
+
log_dir=LOG_DIR,
|
61 |
+
repeat_num=0,
|
62 |
+
memo_file=None):
|
63 |
+
|
64 |
+
# bidder_list[0].verbose=True
|
65 |
+
|
66 |
+
if yield_for_demo:
|
67 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
68 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
69 |
+
|
70 |
+
# ***************** Learn Round ****************
|
71 |
+
for bidder in bidder_list:
|
72 |
+
if bidder.enable_learning and memo_file:
|
73 |
+
# if no prev memo file, then no need to learn.
|
74 |
+
if os.path.exists(memo_file):
|
75 |
+
with open(memo_file) as f:
|
76 |
+
data = json.load(f)
|
77 |
+
past_learnings = data['learnings'][bidder.name]
|
78 |
+
past_auction_log = data['auction_log']
|
79 |
+
bidder.learn_from_prev_auction(past_learnings, past_auction_log)
|
80 |
+
|
81 |
+
# ***************** Plan Round *****************
|
82 |
+
# init bidder profit
|
83 |
+
bidder_profit_info = auctioneer.gather_all_status(bidder_list)
|
84 |
+
for bidder in bidder_list:
|
85 |
+
bidder.set_all_bidders_status(bidder_profit_info)
|
86 |
+
|
87 |
+
plan_instructs = [bidder.get_plan_instruct(auctioneer.items) for bidder in bidder_list]
|
88 |
+
|
89 |
+
bidding_multithread(bidder_list, plan_instructs, func_type='plan', thread_num=thread_num)
|
90 |
+
|
91 |
+
if yield_for_demo:
|
92 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
93 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
94 |
+
|
95 |
+
bar = tqdm(total=len(auctioneer.items_queue), desc='Auction Progress')
|
96 |
+
while not auctioneer.end_auction():
|
97 |
+
cur_item = auctioneer.present_item()
|
98 |
+
|
99 |
+
bid_round = 0
|
100 |
+
while True:
|
101 |
+
# ***************** Bid Round *****************
|
102 |
+
auctioneer_msg = auctioneer.ask_for_bid(bid_round)
|
103 |
+
_bidder_list = []
|
104 |
+
_bid_instruct_list = []
|
105 |
+
# remove highest bidder and withdrawn bidders
|
106 |
+
for bidder in bidder_list:
|
107 |
+
if bidder is auctioneer.highest_bidder or bidder.withdraw:
|
108 |
+
bidder.need_input = False
|
109 |
+
continue
|
110 |
+
else:
|
111 |
+
bidder.need_input = True # enable input from demo
|
112 |
+
instruct = bidder.get_bid_instruct(auctioneer_msg, bid_round)
|
113 |
+
_bidder_list.append(bidder)
|
114 |
+
_bid_instruct_list.append(instruct)
|
115 |
+
|
116 |
+
if yield_for_demo:
|
117 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
118 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + enable_human_box(bidder_list)
|
119 |
+
|
120 |
+
_msgs = bidding_multithread(_bidder_list, _bid_instruct_list, func_type='bid', thread_num=thread_num)
|
121 |
+
|
122 |
+
for i, (msg, bidder) in enumerate(zip(_msgs, _bidder_list)):
|
123 |
+
if bidder.model_name == 'rule':
|
124 |
+
bid_price = bidder.bid_rule(auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)
|
125 |
+
else:
|
126 |
+
bid_price = parse_bid_price(auctioneer, bidder, msg)
|
127 |
+
|
128 |
+
# can't bid more than budget or less than previous highest bid
|
129 |
+
while True:
|
130 |
+
fail_msg = bidder.bid_sanity_check(bid_price, auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)
|
131 |
+
if fail_msg is None:
|
132 |
+
break
|
133 |
+
else:
|
134 |
+
bidder.need_input = True # enable input from demo
|
135 |
+
auctioneer_msg = auctioneer.ask_for_rebid(fail_msg=fail_msg, bid_price=bid_price)
|
136 |
+
rebid_instruct = bidder.get_rebid_instruct(auctioneer_msg)
|
137 |
+
|
138 |
+
if yield_for_demo:
|
139 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
140 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
141 |
+
|
142 |
+
msg = bidder.rebid_for_failure(rebid_instruct)
|
143 |
+
bid_price = parse_bid_price(auctioneer, bidder, msg)
|
144 |
+
|
145 |
+
if yield_for_demo:
|
146 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
147 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
148 |
+
|
149 |
+
bidder.set_withdraw(bid_price)
|
150 |
+
auctioneer.record_bid({'bidder': bidder, 'bid': bid_price, 'raw_msg': msg}, bid_round)
|
151 |
+
|
152 |
+
if yield_for_demo:
|
153 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
154 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
155 |
+
|
156 |
+
is_sold = auctioneer.check_hammer(bid_round)
|
157 |
+
bid_round += 1
|
158 |
+
if is_sold:
|
159 |
+
break
|
160 |
+
else:
|
161 |
+
if auctioneer.fail_to_sell and auctioneer.enable_discount:
|
162 |
+
for bidder in bidder_list:
|
163 |
+
bidder.set_withdraw(0) # back in the game
|
164 |
+
|
165 |
+
# ***************** Summarize *****************
|
166 |
+
summarize_instruct_list = []
|
167 |
+
for bidder in bidder_list:
|
168 |
+
if bidder is auctioneer.highest_bidder:
|
169 |
+
win_lose_msg = bidder.win_bid(cur_item, auctioneer.highest_bid)
|
170 |
+
else:
|
171 |
+
win_lose_msg = bidder.lose_bid(cur_item)
|
172 |
+
msg = bidder.get_summarize_instruct(
|
173 |
+
bidding_history=auctioneer.all_bidding_history_to_string(),
|
174 |
+
hammer_msg=auctioneer.get_hammer_msg(),
|
175 |
+
win_lose_msg=win_lose_msg
|
176 |
+
)
|
177 |
+
summarize_instruct_list.append(msg)
|
178 |
+
|
179 |
+
# record profit information of all bidders for each bidder
|
180 |
+
# (not used in the auction, just for belief tracking evaluation)
|
181 |
+
bidder_profit_info = auctioneer.gather_all_status(bidder_list)
|
182 |
+
for bidder in bidder_list:
|
183 |
+
bidder.set_all_bidders_status(bidder_profit_info)
|
184 |
+
|
185 |
+
bidding_multithread(bidder_list, summarize_instruct_list, func_type='summarize', thread_num=thread_num)
|
186 |
+
|
187 |
+
if yield_for_demo:
|
188 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
189 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
190 |
+
|
191 |
+
# ***************** Replan *****************
|
192 |
+
if len(auctioneer.items_queue) > 0: # no need to replan if all items are sold
|
193 |
+
replan_instruct_list = [bidder.get_replan_instruct(
|
194 |
+
# bidding_history=auctioneer.all_bidding_history_to_string(),
|
195 |
+
# hammer_msg=auctioneer.get_hammer_msg()
|
196 |
+
) for bidder in bidder_list]
|
197 |
+
bidding_multithread(bidder_list, replan_instruct_list, func_type='replan', thread_num=thread_num)
|
198 |
+
|
199 |
+
if yield_for_demo:
|
200 |
+
chatbot_list = bidders_to_chatbots(bidder_list)
|
201 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)
|
202 |
+
|
203 |
+
auctioneer.hammer_fall()
|
204 |
+
bar.update(1)
|
205 |
+
|
206 |
+
total_cost = sum([b.openai_cost for b in bidder_list]) + auctioneer.openai_cost
|
207 |
+
bidder_reports = [bidder.profit_report() for bidder in bidder_list]
|
208 |
+
|
209 |
+
if yield_for_demo:
|
210 |
+
chatbot_list = bidders_to_chatbots(bidder_list, profit_report=True)
|
211 |
+
yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log(bidder_reports) + f'\n## Total Cost: ${total_cost}'] + [disable_gr, enable_gr] + disable_all_box(bidder_list)
|
212 |
+
|
213 |
+
memo = {'auction_log': auctioneer.log(show_model_name=False),
|
214 |
+
'memo_text': bidder_reports,
|
215 |
+
'profit': {bidder.name: bidder.profit for bidder in bidder_list},
|
216 |
+
'total_cost': total_cost,
|
217 |
+
'learnings': {bidder.name: bidder.learnings for bidder in bidder_list},
|
218 |
+
'model_info': {bidder.name: bidder.model_name for bidder in bidder_list}}
|
219 |
+
log_bidders(log_dir, auction_hash, bidder_list, repeat_num, memo)
|
220 |
+
|
221 |
+
auctioneer.finish_auction()
|
222 |
+
|
223 |
+
if not yield_for_demo:
|
224 |
+
yield total_cost
|
225 |
+
|
226 |
+
|
227 |
+
def log_bidders(log_dir: str, auction_hash: str, bidder_list: List[Bidder], repeat_num: int, memo: dict):
|
228 |
+
for bidder in bidder_list:
|
229 |
+
log_file = f"{log_dir}/{auction_hash}/{bidder.name.replace(' ', '')}-{repeat_num}.jsonl"
|
230 |
+
if not os.path.exists(log_file):
|
231 |
+
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
232 |
+
with open(log_file, 'a') as f:
|
233 |
+
log_data = bidder.to_monitors(as_json=True)
|
234 |
+
f.write(json.dumps(log_data) + '\n')
|
235 |
+
|
236 |
+
with open(f"{log_dir}/{auction_hash}/memo-{repeat_num}.json", 'w') as f:
|
237 |
+
f.write(json.dumps(memo) + '\n')
|
238 |
+
|
239 |
+
|
240 |
+
def make_auction_hash():
|
241 |
+
return str(int(time.time()))
|
242 |
+
|
243 |
+
|
244 |
+
if __name__ == '__main__':
|
245 |
+
import argparse
|
246 |
+
from src.item_base import create_items
|
247 |
+
from src.bidder_base import create_bidders
|
248 |
+
from transformers import GPT2TokenizerFast
|
249 |
+
import cjjpy as cjj
|
250 |
+
|
251 |
+
parser = argparse.ArgumentParser()
|
252 |
+
parser.add_argument('--input_dir', '-i', type=str, default='data/exp_base/')
|
253 |
+
parser.add_argument('--shuffle', action='store_true')
|
254 |
+
parser.add_argument('--repeat', type=int, default=1)
|
255 |
+
parser.add_argument('--threads', '-t', type=int, help='Number of threads. Max is number of bidders. Reduce it if rate limit is low (e.g., GPT-4).', required=True)
|
256 |
+
parser.add_argument('--memo_file', '-m', type=str, help='The last memo.json file to be loaded for learning. Only useful when the repeated auctions are interrupted (i.e., auction hash is different).')
|
257 |
+
args = parser.parse_args()
|
258 |
+
|
259 |
+
auction_hash = make_auction_hash()
|
260 |
+
|
261 |
+
total_money_spent = 0
|
262 |
+
for i in tqdm(range(args.repeat), desc='Repeat'):
|
263 |
+
cnt = 3
|
264 |
+
while cnt > 0:
|
265 |
+
try:
|
266 |
+
item_file = os.path.join(args.input_dir, f'items_demo.jsonl')
|
267 |
+
bidder_file = os.path.join(args.input_dir, f'bidders_demo.jsonl')
|
268 |
+
memo_file = args.memo_file if args.memo_file else f'{args.input_dir}/{auction_hash}/memo-{i-1}.json' # past memo for learning
|
269 |
+
items = create_items(item_file)
|
270 |
+
bidders = create_bidders(bidder_file, auction_hash=auction_hash)
|
271 |
+
auctioneer = Auctioneer(enable_discount=False)
|
272 |
+
auctioneer.init_items(items)
|
273 |
+
if args.shuffle:
|
274 |
+
auctioneer.shuffle_items()
|
275 |
+
money_spent = list(run_auction(
|
276 |
+
auction_hash,
|
277 |
+
auctioneer,
|
278 |
+
bidders,
|
279 |
+
thread_num=min(args.threads, len(bidders)),
|
280 |
+
yield_for_demo=False,
|
281 |
+
log_dir=args.input_dir,
|
282 |
+
repeat_num=i,
|
283 |
+
memo_file=memo_file,
|
284 |
+
))
|
285 |
+
total_money_spent += sum(money_spent)
|
286 |
+
break
|
287 |
+
except Exception as e:
|
288 |
+
cnt -= 1
|
289 |
+
print(f"Error in {i}th auction: {e}\n{trace_back(e)}")
|
290 |
+
print(f"Retry {cnt} more times...")
|
291 |
+
|
292 |
+
print('Total money spent: $', total_money_spent)
|
293 |
+
cjj.SendEmail(f'Completed: {args.input_dir} - {auction_hash}', f'Total money spent: ${total_money_spent}')
|
data/bidders_demo.jsonl
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"name": "Bidder 1", "model_name": "gpt-3.5-turbo-0613", "budget": 20000, "desire": "maximize_profit", "plan_strategy": "adaptive", "temperature": 0.7, "overestimate_percent": 10, "correct_belief": true, "enable_learning": true}
|
2 |
+
{"name": "Bidder 2", "model_name": "gpt-3.5-turbo-0613", "budget": 20000, "desire": "maximize_items", "plan_strategy": "adaptive", "temperature": 0.7, "overestimate_percent": 10, "correct_belief": true, "enable_learning": true}
|
3 |
+
{"name": "Bidder 3", "model_name": "gpt-3.5-turbo-0613", "budget": 20000, "desire": "maximize_profit", "plan_strategy": "adaptive", "temperature": 0.7, "overestimate_percent": 10, "correct_belief": true, "enable_learning": true}
|
4 |
+
{"name": "Bidder 4", "model_name": "gpt-3.5-turbo-0613", "budget": 20000, "desire": "maximize_items", "plan_strategy": "adaptive", "temperature": 0.7, "overestimate_percent": 10, "correct_belief": true, "enable_learning": true}
|
data/items_demo.jsonl
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"name": "Widget A", "price": 1000, "desc": "A widget for all your needs", "id": 1, "true_value": 2000}
|
2 |
+
{"name": "Gadget B", "price": 1000, "desc": "A gadget with all the latest features", "id": 2, "true_value": 2000}
|
3 |
+
{"name": "Thingamajig C", "price": 1000, "desc": "A little thing that is sure to impress", "id": 3, "true_value": 2000}
|
4 |
+
{"name": "Doodad D", "price": 1000, "desc": "A durable doodad that will last for years", "id": 4, "true_value": 2000}
|
5 |
+
{"name": "Equipment E", "price": 5000, "desc": "A piece of equipment for any tough job", "id": 5, "true_value": 10000}
|
6 |
+
{"name": "Gizmo F", "price": 1000, "desc": "A gizmo that will surprise and delight", "id": 6, "true_value": 2000}
|
7 |
+
{"name": "Implement G", "price": 1000, "desc": "A implement for everyday tasks", "id": 7, "true_value": 2000}
|
8 |
+
{"name": "Apparatus H", "price": 1000, "desc": "An apparatus for specialized operations", "id": 8, "true_value": 2000}
|
9 |
+
{"name": "Contraption I", "price": 1000, "desc": "A contraption that sparks creativity", "id": 9, "true_value": 2000}
|
10 |
+
{"name": "Mechanism J", "price": 5000, "desc": "A mechanism for repetitive tasks", "id": 10, "true_value": 10000}
|
11 |
+
{"name": "Tool K", "price": 1000, "desc": "A tool for complex projects", "id": 11, "true_value": 2000}
|
12 |
+
{"name": "Device L", "price": 1000, "desc": "A device that enhances performance", "id": 12, "true_value": 2000}
|
13 |
+
{"name": "Instrument M", "price": 1000, "desc": "A instrument for precise measurements", "id": 13, "true_value": 2000}
|
14 |
+
{"name": "Utensil N", "price": 1000, "desc": "A utensil for dining and cooking", "id": 14, "true_value": 2000}
|
15 |
+
{"name": "Appliance O", "price": 1000, "desc": "A appliance for everyday household tasks", "id": 15, "true_value": 2000}
|
16 |
+
{"name": "Machine P", "price": 5000, "desc": "A machine for automated tasks", "id": 16, "true_value": 10000}
|
17 |
+
{"name": "Unit Q", "price": 1000, "desc": "A unit for individual tasks", "id": 17, "true_value": 2000}
|
18 |
+
{"name": "Element R", "price": 5000, "desc": "A element for scientific investigations", "id": 18, "true_value": 10000}
|
19 |
+
{"name": "Component S", "price": 1000, "desc": "A component for assembly and repair", "id": 19, "true_value": 2000}
|
20 |
+
{"name": "Piece T", "price": 1000, "desc": "A piece for art and craft projects", "id": 20, "true_value": 2000}
|
21 |
+
{"name": "Object U", "price": 1000, "desc": "An object for miscellaneous uses", "id": 21, "true_value": 2000}
|
22 |
+
{"name": "Item V", "price": 1000, "desc": "An item for versatile applications", "id": 22, "true_value": 2000}
|
23 |
+
{"name": "Product W", "price": 1000, "desc": "A product designed for efficiency", "id": 23, "true_value": 2000}
|
24 |
+
{"name": "Accessory X", "price": 1000, "desc": "An accessory to complement any outfit", "id": 24, "true_value": 2000}
|
25 |
+
{"name": "Module Y", "price": 1000, "desc": "A module for modular systems", "id": 25, "true_value": 2000}
|
26 |
+
{"name": "Entity Z", "price": 1000, "desc": "An entity with unique properties", "id": 26, "true_value": 2000}
|
requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai>=0.27.8
|
2 |
+
langchain>=0.0.234
|
3 |
+
anthropic>=0.3.10
|
4 |
+
gradio
|
5 |
+
pydantic
|
6 |
+
coloredlogs
|
7 |
+
ujson
|
8 |
+
tiktoken
|
9 |
+
tqdm
|
10 |
+
inflect
|
11 |
+
vertexai
|
12 |
+
google-cloud-aiplatform>=1.28.1
|
13 |
+
torch
|
14 |
+
pygments
|
15 |
+
matplotlib
|
16 |
+
transformers
|
17 |
+
trueskill
|
18 |
+
seaborn
|
19 |
+
vllm
|
20 |
+
google-generativeai
|
src/auctioneer_base.py
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from typing import List, Dict
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.chat_models import ChatOpenAI
|
5 |
+
from langchain.callbacks import get_openai_callback
|
6 |
+
from pydantic import BaseModel
|
7 |
+
from collections import defaultdict
|
8 |
+
from langchain.schema import (
|
9 |
+
AIMessage,
|
10 |
+
HumanMessage,
|
11 |
+
SystemMessage
|
12 |
+
)
|
13 |
+
import random
|
14 |
+
import inflect
|
15 |
+
from .bidder_base import Bidder
|
16 |
+
from .human_bidder import HumanBidder
|
17 |
+
from .item_base import Item
|
18 |
+
from .prompt_base import PARSE_BID_INSTRUCTION
|
19 |
+
|
20 |
+
p = inflect.engine()
|
21 |
+
|
22 |
+
|
23 |
+
class Auctioneer(BaseModel):
|
24 |
+
enable_discount: bool = False
|
25 |
+
items: List[Item] = []
|
26 |
+
cur_item: Item = None
|
27 |
+
highest_bidder: Bidder = None
|
28 |
+
highest_bid: int = -1
|
29 |
+
bidding_history = defaultdict(list) # history about the bidding war of one item
|
30 |
+
items_queue: List[Item] = [] # updates when a item is taken.
|
31 |
+
auction_logs = defaultdict(list) # history about the bidding war of all items
|
32 |
+
openai_cost = 0
|
33 |
+
prev_round_max_bid: int = -1
|
34 |
+
min_bid: int = 0
|
35 |
+
fail_to_sell = False
|
36 |
+
min_markup_pct = 0.1
|
37 |
+
|
38 |
+
class Config:
|
39 |
+
arbitrary_types_allowed = True
|
40 |
+
|
41 |
+
def init_items(self, items: List[Item]):
|
42 |
+
for item in items:
|
43 |
+
# reset discounted price
|
44 |
+
item.reset_price()
|
45 |
+
self.items = items
|
46 |
+
self.items_queue = items.copy()
|
47 |
+
|
48 |
+
def summarize_items_info(self):
|
49 |
+
desc = ''
|
50 |
+
for item in self.items:
|
51 |
+
desc += f"- {item.get_desc()}\n"
|
52 |
+
return desc.strip()
|
53 |
+
|
54 |
+
def present_item(self):
|
55 |
+
cur_item = self.items_queue.pop(0)
|
56 |
+
self.cur_item = cur_item
|
57 |
+
return cur_item
|
58 |
+
|
59 |
+
def shuffle_items(self):
|
60 |
+
random.shuffle(self.items)
|
61 |
+
self.items_queue = self.items.copy()
|
62 |
+
|
63 |
+
def record_bid(self, bid_info: dict, bid_round: int):
|
64 |
+
'''
|
65 |
+
Save the bidding history for each round, log the highest bidder and highest bidding
|
66 |
+
'''
|
67 |
+
# bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}
|
68 |
+
self.bidding_history[bid_round].append(bid_info)
|
69 |
+
for hist in self.bidding_history[bid_round]:
|
70 |
+
if hist['bid'] > 0:
|
71 |
+
if self.highest_bid < hist['bid']:
|
72 |
+
self.highest_bid = hist['bid']
|
73 |
+
self.highest_bidder = hist['bidder']
|
74 |
+
elif self.highest_bid == hist['bid']:
|
75 |
+
# random if there's a tie
|
76 |
+
self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])
|
77 |
+
self.auction_logs[f"{self.cur_item.get_desc()}"].append(
|
78 |
+
{'bidder': bid_info['bidder'],
|
79 |
+
'bid': bid_info['bid'],
|
80 |
+
'bid_round': bid_round})
|
81 |
+
|
82 |
+
def _biddings_to_string(self, bid_round: int):
|
83 |
+
'''
|
84 |
+
Return a string that summarizes the bidding history in a round
|
85 |
+
'''
|
86 |
+
# bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\n'
|
87 |
+
bid_hist_text = ''
|
88 |
+
for js in self.bidding_history[bid_round]:
|
89 |
+
if js['bid'] < 0:
|
90 |
+
bid_hist_text += f"- {js['bidder']} withdrew\n"
|
91 |
+
else:
|
92 |
+
bid_hist_text += f"- {js['bidder']}: ${js['bid']}\n"
|
93 |
+
return bid_hist_text.strip()
|
94 |
+
|
95 |
+
def all_bidding_history_to_string(self):
|
96 |
+
bid_hist_text = ''
|
97 |
+
for bid_round in self.bidding_history:
|
98 |
+
bid_hist_text += f"Round {bid_round}:\n{self._biddings_to_string(bid_round)}\n\n"
|
99 |
+
return bid_hist_text.strip()
|
100 |
+
|
101 |
+
def ask_for_bid(self, bid_round: int):
|
102 |
+
'''
|
103 |
+
Ask for bid, return the message to be sent to bidders
|
104 |
+
'''
|
105 |
+
if self.highest_bidder is None:
|
106 |
+
if bid_round > 0:
|
107 |
+
msg = f"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?"
|
108 |
+
else:
|
109 |
+
remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]
|
110 |
+
msg = f"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\n\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?"
|
111 |
+
else:
|
112 |
+
bidding_history = self._biddings_to_string(bid_round - 1)
|
113 |
+
msg = f"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\n{bidding_history}\n\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?"
|
114 |
+
return msg
|
115 |
+
|
116 |
+
def ask_for_rebid(self, fail_msg: str, bid_price: int):
|
117 |
+
return f"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid."
|
118 |
+
|
119 |
+
def get_hammer_msg(self):
|
120 |
+
if self.highest_bidder is None:
|
121 |
+
return f"Since no one bid on {self.cur_item.name}, we'll move on to the next item."
|
122 |
+
else:
|
123 |
+
return f"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}."# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}."
|
124 |
+
|
125 |
+
def check_hammer(self, bid_round: int):
|
126 |
+
# check if the item is sold
|
127 |
+
self.fail_to_sell = False
|
128 |
+
num_bid = self._num_bids_in_round(bid_round)
|
129 |
+
|
130 |
+
# highest_bidder has already been updated in record_bid().
|
131 |
+
# so when num_bid == 0 & highest_bidder is None, it means no one bid on this item
|
132 |
+
if self.highest_bidder is None:
|
133 |
+
if num_bid == 0:
|
134 |
+
# failed to sell, as there is no highest bidder
|
135 |
+
self.fail_to_sell = True
|
136 |
+
if self.enable_discount and bid_round < 3:
|
137 |
+
# lower the starting price by 50%. discoutn only applies to the first 3 rounds
|
138 |
+
self.cur_item.lower_price(0.5)
|
139 |
+
is_sold = False
|
140 |
+
else:
|
141 |
+
is_sold = True
|
142 |
+
else:
|
143 |
+
# won't happen
|
144 |
+
raise ValueError(f"highest_bidder is None but num_bid is {num_bid}")
|
145 |
+
else:
|
146 |
+
if self.prev_round_max_bid < 0 and num_bid == 1:
|
147 |
+
# only one bidder in the first round
|
148 |
+
is_sold = True
|
149 |
+
else:
|
150 |
+
self.prev_round_max_bid = self.highest_bid
|
151 |
+
is_sold = self._num_bids_in_round(bid_round) == 0
|
152 |
+
return is_sold
|
153 |
+
|
154 |
+
def _num_bids_in_round(self, bid_round: int):
|
155 |
+
# check if there is no bid in the current round
|
156 |
+
cnt = 0
|
157 |
+
for hist in self.bidding_history[bid_round]:
|
158 |
+
if hist['bid'] > 0:
|
159 |
+
cnt += 1
|
160 |
+
return cnt
|
161 |
+
|
162 |
+
def hammer_fall(self):
|
163 |
+
print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')
|
164 |
+
self.auction_logs[f"{self.cur_item.get_desc()}"].append({
|
165 |
+
'bidder': self.highest_bidder,
|
166 |
+
'bid': f"{self.highest_bid} (${self.cur_item.true_value})", # no need for the first $, as it will be added in the self.log()
|
167 |
+
'bid_round': 'Hammer price (true value)'})
|
168 |
+
self.cur_item = None
|
169 |
+
self.highest_bidder = None
|
170 |
+
self.highest_bid = -1
|
171 |
+
self.bidding_history = defaultdict(list)
|
172 |
+
self.prev_round_max_bid = -1
|
173 |
+
self.fail_to_sell = False
|
174 |
+
|
175 |
+
def end_auction(self):
|
176 |
+
return len(self.items_queue) == 0
|
177 |
+
|
178 |
+
def gather_all_status(self, bidders: List[Bidder]):
|
179 |
+
status = {}
|
180 |
+
for bidder in bidders:
|
181 |
+
status[bidder.name] = {
|
182 |
+
'profit': bidder.profit,
|
183 |
+
'items_won': bidder.items_won
|
184 |
+
}
|
185 |
+
return status
|
186 |
+
|
187 |
+
def parse_bid(self, text: str):
|
188 |
+
prompt = PARSE_BID_INSTRUCTION.format(response=text)
|
189 |
+
with get_openai_callback() as cb:
|
190 |
+
llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)
|
191 |
+
result = llm([HumanMessage(content=prompt)]).content
|
192 |
+
self.openai_cost += cb.total_cost
|
193 |
+
|
194 |
+
bid_number = re.findall(r'\$?\d+', result.replace(',', ''))
|
195 |
+
# find number in the result
|
196 |
+
if '-1' in result:
|
197 |
+
return -1
|
198 |
+
elif len(bid_number) > 0:
|
199 |
+
return int(bid_number[-1].replace('$', ''))
|
200 |
+
else:
|
201 |
+
print('* Rebid:', text)
|
202 |
+
return None
|
203 |
+
|
204 |
+
def log(self, bidder_personal_reports: list = [], show_model_name=True):
|
205 |
+
''' example
|
206 |
+
Apparatus H, starting at $1000.
|
207 |
+
|
208 |
+
1st bid:
|
209 |
+
Bidder 1 (gpt-3.5-turbo-16k-0613): $1200
|
210 |
+
Bidder 2 (gpt-3.5-turbo-16k-0613): $1100
|
211 |
+
Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn
|
212 |
+
Bidder 4 (gpt-3.5-turbo-16k-0613): $1200
|
213 |
+
|
214 |
+
2nd bid:
|
215 |
+
Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn
|
216 |
+
Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn
|
217 |
+
|
218 |
+
Hammer price:
|
219 |
+
Bidder 4 (gpt-3.5-turbo-16k-0613): $1200
|
220 |
+
'''
|
221 |
+
markdown_output = "## Auction Log\n\n"
|
222 |
+
for i, (item, bids) in enumerate(self.auction_logs.items()):
|
223 |
+
markdown_output += f"### {i+1}. {item}\n\n"
|
224 |
+
cur_bid_round = -1
|
225 |
+
for i, bid in enumerate(bids):
|
226 |
+
if bid['bid_round'] != cur_bid_round:
|
227 |
+
cur_bid_round = bid['bid_round']
|
228 |
+
if isinstance(bid['bid_round'], int):
|
229 |
+
markdown_output += f"\n#### {p.ordinal(bid['bid_round']+1)} bid:\n\n"
|
230 |
+
else:
|
231 |
+
markdown_output += f"\n#### {bid['bid_round']}:\n\n"
|
232 |
+
bid_price = f"${bid['bid']}" if bid['bid'] != -1 else 'Withdrew'
|
233 |
+
if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):
|
234 |
+
if show_model_name:
|
235 |
+
markdown_output += f"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\n"
|
236 |
+
else:
|
237 |
+
markdown_output += f"* {bid['bidder']}: {bid_price}\n"
|
238 |
+
else:
|
239 |
+
markdown_output += f"* None bid\n"
|
240 |
+
markdown_output += "\n"
|
241 |
+
|
242 |
+
if len(bidder_personal_reports) != 0:
|
243 |
+
markdown_output += f"\n## Personal Report"
|
244 |
+
for report in bidder_personal_reports:
|
245 |
+
markdown_output += f"\n\n{report}"
|
246 |
+
return markdown_output.strip()
|
247 |
+
|
248 |
+
def finish_auction(self):
|
249 |
+
self.auction_logs = defaultdict(list)
|
250 |
+
self.cur_item = None
|
251 |
+
self.highest_bidder = None
|
252 |
+
self.highest_bid = -1
|
253 |
+
self.bidding_history = defaultdict(list)
|
254 |
+
self.items_queue = []
|
255 |
+
self.items = []
|
256 |
+
self.prev_round_max_bid = -1
|
257 |
+
self.fail_to_sell = False
|
258 |
+
self.min_bid = 0
|
259 |
+
|
src/bidder_base.py
ADDED
@@ -0,0 +1,1031 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from langchain.base_language import BaseLanguageModel
|
3 |
+
from langchain.schema import (
|
4 |
+
AIMessage,
|
5 |
+
HumanMessage,
|
6 |
+
SystemMessage
|
7 |
+
)
|
8 |
+
from langchain.chat_models import (
|
9 |
+
ChatAnthropic,
|
10 |
+
ChatOpenAI,
|
11 |
+
ChatVertexAI,
|
12 |
+
ChatGooglePalm,
|
13 |
+
)
|
14 |
+
import vertexai
|
15 |
+
from langchain.input import get_colored_text
|
16 |
+
from langchain.callbacks import get_openai_callback
|
17 |
+
from collections import defaultdict
|
18 |
+
from pydantic import BaseModel
|
19 |
+
import queue
|
20 |
+
import threading
|
21 |
+
import os
|
22 |
+
import random
|
23 |
+
import time
|
24 |
+
import ujson as json
|
25 |
+
import matplotlib.pyplot as plt
|
26 |
+
from .item_base import Item, item_list_equal
|
27 |
+
from .prompt_base import (
|
28 |
+
AUCTION_HISTORY,
|
29 |
+
# INSTRUCT_OBSERVE_TEMPLATE,
|
30 |
+
_LEARNING_STATEMENT,
|
31 |
+
INSTRUCT_PLAN_TEMPLATE,
|
32 |
+
INSTRUCT_BID_TEMPLATE,
|
33 |
+
INSTRUCT_SUMMARIZE_TEMPLATE,
|
34 |
+
INSTRUCT_LEARNING_TEMPLATE,
|
35 |
+
INSTRUCT_REPLAN_TEMPLATE,
|
36 |
+
SYSTEM_MESSAGE,
|
37 |
+
)
|
38 |
+
import sys
|
39 |
+
sys.path.append('..')
|
40 |
+
from utils import LoadJsonL, extract_jsons_from_text, extract_numbered_list, trace_back
|
41 |
+
|
42 |
+
|
43 |
+
# DESIRE_DESC = {
|
44 |
+
# 'default': "Your goal is to fully utilize your budget while actively participating in the auction",
|
45 |
+
# 'maximize_profit': "Your goal is to maximize your overall profit, and fully utilize your budget while actively participating in the auction. This involves strategic bidding to win items for less than their true value, thereby ensuring the difference between the price paid and the item's value is as large as possible",
|
46 |
+
# 'maximize_items': "Your goal is to win as many items as possible, and fully utilize your budget while actively participating in the auction. While keeping your budget in mind, you should aim to participate broadly across different items, striving to be the highest bidder more often than not",
|
47 |
+
# } # remove period at the end of each description
|
48 |
+
|
49 |
+
|
50 |
+
DESIRE_DESC = {
|
51 |
+
'maximize_profit': "Your primary objective is to secure the highest profit at the end of this auction, compared to all other bidders",
|
52 |
+
'maximize_items': "Your primary objective is to win the highest number of items at the end of this auction, compared to everyone else",
|
53 |
+
}
|
54 |
+
|
55 |
+
|
56 |
+
class Bidder(BaseModel):
|
57 |
+
name: str
|
58 |
+
model_name: str
|
59 |
+
budget: int
|
60 |
+
desire: str
|
61 |
+
plan_strategy: str
|
62 |
+
temperature: float = 0.7
|
63 |
+
overestimate_percent: int = 10
|
64 |
+
correct_belief: bool
|
65 |
+
enable_learning: bool = False
|
66 |
+
|
67 |
+
llm: BaseLanguageModel = None
|
68 |
+
openai_cost = 0
|
69 |
+
llm_token_count = 0
|
70 |
+
|
71 |
+
verbose: bool = False
|
72 |
+
auction_hash: str = ''
|
73 |
+
|
74 |
+
system_message: str = ''
|
75 |
+
original_budget: int = 0
|
76 |
+
|
77 |
+
# working memory
|
78 |
+
profit: int = 0
|
79 |
+
cur_item_id = 0
|
80 |
+
items: list = []
|
81 |
+
dialogue_history: list = [] # for gradio UI display
|
82 |
+
llm_prompt_history: list = [] # for tracking llm calling
|
83 |
+
items_won = []
|
84 |
+
bid_history: list = [] # history of the bidding of a single item
|
85 |
+
plan_instruct: str = '' # instruction for planning
|
86 |
+
cur_plan: str = '' # current plan
|
87 |
+
status_quo: dict = {} # belief of budget and profit, self and others
|
88 |
+
withdraw: bool = False # state of withdraw
|
89 |
+
learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.
|
90 |
+
max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)
|
91 |
+
rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item
|
92 |
+
|
93 |
+
# belief tracking
|
94 |
+
failed_bid_cnt: int = 0 # count of failed bids (overspending)
|
95 |
+
total_bid_cnt: int = 0 # count of total bids
|
96 |
+
self_belief_error_cnt: int = 0
|
97 |
+
total_self_belief_cnt: int = 0
|
98 |
+
other_belief_error_cnt: int = 0
|
99 |
+
total_other_belief_cnt: int = 0
|
100 |
+
|
101 |
+
engagement_count: int = 0
|
102 |
+
budget_history = []
|
103 |
+
profit_history = []
|
104 |
+
budget_error_history = []
|
105 |
+
profit_error_history = []
|
106 |
+
win_bid_error_history = []
|
107 |
+
engagement_history = defaultdict(int)
|
108 |
+
all_bidders_status = {} # track others' profit
|
109 |
+
changes_of_plan = []
|
110 |
+
|
111 |
+
# not used
|
112 |
+
input_box: str = None
|
113 |
+
need_input = False
|
114 |
+
semaphore = 0
|
115 |
+
|
116 |
+
class Config:
|
117 |
+
arbitrary_types_allowed = True
|
118 |
+
|
119 |
+
def __repr__(self):
|
120 |
+
return self.name
|
121 |
+
|
122 |
+
def __str__(self):
|
123 |
+
return self.name
|
124 |
+
|
125 |
+
@classmethod
|
126 |
+
def create(cls, **data):
|
127 |
+
instance = cls(**data)
|
128 |
+
instance._post_init()
|
129 |
+
return instance
|
130 |
+
|
131 |
+
def _post_init(self):
|
132 |
+
self.original_budget = self.budget
|
133 |
+
self.system_message = SYSTEM_MESSAGE.format(
|
134 |
+
name=self.name,
|
135 |
+
desire_desc=DESIRE_DESC[self.desire],
|
136 |
+
)
|
137 |
+
self._parse_llm()
|
138 |
+
self.dialogue_history += [
|
139 |
+
SystemMessage(content=self.system_message),
|
140 |
+
AIMessage(content='')
|
141 |
+
]
|
142 |
+
self.budget_history.append(self.budget)
|
143 |
+
self.profit_history.append(self.profit)
|
144 |
+
|
145 |
+
def _parse_llm(self):
|
146 |
+
if 'gpt-' in self.model_name:
|
147 |
+
self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)
|
148 |
+
elif 'claude' in self.model_name:
|
149 |
+
self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)
|
150 |
+
elif 'bison' in self.model_name:
|
151 |
+
self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)
|
152 |
+
elif 'rule' in self.model_name or 'human' in self.model_name:
|
153 |
+
self.llm = None
|
154 |
+
else:
|
155 |
+
raise NotImplementedError(self.model_name)
|
156 |
+
|
157 |
+
# def _rotate_openai_org(self):
|
158 |
+
# # use two organizations to avoid rate limit
|
159 |
+
# if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):
|
160 |
+
# return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])
|
161 |
+
# else:
|
162 |
+
# return None
|
163 |
+
|
164 |
+
def _run_llm_standalone(self, messages: list):
|
165 |
+
|
166 |
+
with get_openai_callback() as cb:
|
167 |
+
for i in range(6):
|
168 |
+
try:
|
169 |
+
input_token_num = self.llm.get_num_tokens_from_messages(messages)
|
170 |
+
if 'claude' in self.model_name: # anthropic's claude
|
171 |
+
result = self.llm(messages, max_tokens_to_sample=2048)
|
172 |
+
elif 'bison' in self.model_name: # google's palm-2
|
173 |
+
max_tokens = min(max(3900 - input_token_num, 192), 2048)
|
174 |
+
if isinstance(self.llm, ChatVertexAI):
|
175 |
+
result = self.llm(messages, max_output_tokens=max_tokens)
|
176 |
+
else:
|
177 |
+
result = self.llm(messages)
|
178 |
+
elif 'gpt' in self.model_name: # openai
|
179 |
+
if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:
|
180 |
+
max_tokens = max(3900 - input_token_num, 192)
|
181 |
+
else:
|
182 |
+
# gpt-4
|
183 |
+
# self.llm.openai_organization = self._rotate_openai_org()
|
184 |
+
max_tokens = max(8000 - input_token_num, 192)
|
185 |
+
result = self.llm(messages, max_tokens=max_tokens)
|
186 |
+
elif 'llama' in self.model_name.lower():
|
187 |
+
raise NotImplementedError
|
188 |
+
else:
|
189 |
+
raise NotImplementedError
|
190 |
+
break
|
191 |
+
except:
|
192 |
+
print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')
|
193 |
+
time.sleep(2**(i+1))
|
194 |
+
self.openai_cost += cb.total_cost
|
195 |
+
self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)
|
196 |
+
return result.content
|
197 |
+
|
198 |
+
def _get_estimated_value(self, item):
|
199 |
+
value = item.true_value * (1 + self.overestimate_percent / 100)
|
200 |
+
return int(value)
|
201 |
+
|
202 |
+
def _get_cur_item(self, key=None):
|
203 |
+
if self.cur_item_id < len(self.items):
|
204 |
+
if key is not None:
|
205 |
+
return self.items[self.cur_item_id].__dict__[key]
|
206 |
+
else:
|
207 |
+
return self.items[self.cur_item_id]
|
208 |
+
else:
|
209 |
+
return 'no item left'
|
210 |
+
|
211 |
+
def _get_next_item(self, key=None):
|
212 |
+
if self.cur_item_id + 1 < len(self.items):
|
213 |
+
if key is not None:
|
214 |
+
return self.items[self.cur_item_id + 1].__dict__[key]
|
215 |
+
else:
|
216 |
+
return self.items[self.cur_item_id + 1]
|
217 |
+
else:
|
218 |
+
return 'no item left'
|
219 |
+
|
220 |
+
def _get_remaining_items(self, as_str=False):
|
221 |
+
remain_items = self.items[self.cur_item_id + 1:]
|
222 |
+
if as_str:
|
223 |
+
return ', '.join([item.name for item in remain_items])
|
224 |
+
else:
|
225 |
+
return remain_items
|
226 |
+
|
227 |
+
def _get_items_value_str(self, items: List[Item]):
|
228 |
+
if not isinstance(items, list):
|
229 |
+
items = [items]
|
230 |
+
items_info = ''
|
231 |
+
for i, item in enumerate(items):
|
232 |
+
estimated_value = self._get_estimated_value(item)
|
233 |
+
_info = f"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\n"
|
234 |
+
items_info += _info
|
235 |
+
return items_info.strip()
|
236 |
+
|
237 |
+
# ********** Main Instructions and Functions ********** #
|
238 |
+
|
239 |
+
def learn_from_prev_auction(self, past_learnings, past_auction_log):
|
240 |
+
if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:
|
241 |
+
return ''
|
242 |
+
|
243 |
+
instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(
|
244 |
+
past_auction_log=past_auction_log,
|
245 |
+
past_learnings=past_learnings)
|
246 |
+
|
247 |
+
result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])
|
248 |
+
self.dialogue_history += [
|
249 |
+
HumanMessage(content=instruct_learn),
|
250 |
+
AIMessage(content=result),
|
251 |
+
]
|
252 |
+
self.llm_prompt_history.append({
|
253 |
+
'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],
|
254 |
+
'result': result,
|
255 |
+
'tag': 'learn_0'
|
256 |
+
})
|
257 |
+
|
258 |
+
self.learnings = '\n'.join(extract_numbered_list(result))
|
259 |
+
if self.learnings != '':
|
260 |
+
self.system_message += f"\n\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\n```\n{self.learnings}\n```"
|
261 |
+
|
262 |
+
if self.verbose:
|
263 |
+
print(f"Learn from previous auction: {self.name} ({self.model_name}).")
|
264 |
+
return result
|
265 |
+
|
266 |
+
def _choose_items(self, budget, items: List[Item]):
|
267 |
+
'''
|
268 |
+
Choose items within budget for rule bidders.
|
269 |
+
Cheap ones first if maximize_items, expensive ones first if maximize_profit.
|
270 |
+
'''
|
271 |
+
sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x),
|
272 |
+
reverse=self.desire == 'maximize_profit')
|
273 |
+
|
274 |
+
chosen_items = []
|
275 |
+
i = 0
|
276 |
+
while budget >= 0 and i < len(sorted_items):
|
277 |
+
item = sorted_items[i]
|
278 |
+
if item.price <= budget:
|
279 |
+
chosen_items.append(item)
|
280 |
+
budget -= item.price
|
281 |
+
i += 1
|
282 |
+
|
283 |
+
return chosen_items
|
284 |
+
|
285 |
+
def get_plan_instruct(self, items: List[Item]):
|
286 |
+
self.items = items
|
287 |
+
plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(
|
288 |
+
bidder_name=self.name,
|
289 |
+
budget=self.budget,
|
290 |
+
item_num=len(items),
|
291 |
+
items_info=self._get_items_value_str(items),
|
292 |
+
desire_desc=DESIRE_DESC[self.desire],
|
293 |
+
learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT
|
294 |
+
)
|
295 |
+
return plan_instruct
|
296 |
+
|
297 |
+
def init_plan(self, plan_instruct: str):
|
298 |
+
'''
|
299 |
+
Plan for bidding with auctioneer's instruction and items information for customize estimated value.
|
300 |
+
plan = plan(system_message, instruct_plan)
|
301 |
+
'''
|
302 |
+
if 'rule' in self.model_name:
|
303 |
+
# self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])
|
304 |
+
# self.dialogue_history += [
|
305 |
+
# HumanMessage(content=plan_instruct),
|
306 |
+
# AIMessage(content=self.cur_plan),
|
307 |
+
# ]
|
308 |
+
# return self.cur_plan
|
309 |
+
return ''
|
310 |
+
|
311 |
+
self.status_quo = {
|
312 |
+
'remaining_budget': self.budget,
|
313 |
+
'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},
|
314 |
+
'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},
|
315 |
+
}
|
316 |
+
|
317 |
+
if self.plan_strategy == 'none':
|
318 |
+
self.plan_instruct = ''
|
319 |
+
self.cur_plan = ''
|
320 |
+
return None
|
321 |
+
|
322 |
+
system_msg = SystemMessage(content=self.system_message)
|
323 |
+
plan_msg = HumanMessage(content=plan_instruct)
|
324 |
+
messages = [system_msg, plan_msg]
|
325 |
+
result = self._run_llm_standalone(messages)
|
326 |
+
|
327 |
+
if self.verbose:
|
328 |
+
print(get_colored_text(plan_msg.content, 'red'))
|
329 |
+
print(get_colored_text(result, 'green'))
|
330 |
+
|
331 |
+
self.dialogue_history += [
|
332 |
+
plan_msg,
|
333 |
+
AIMessage(content=result),
|
334 |
+
]
|
335 |
+
self.llm_prompt_history.append({
|
336 |
+
'messages': [{x.type: x.content} for x in messages],
|
337 |
+
'result': result,
|
338 |
+
'tag': 'plan_0'
|
339 |
+
})
|
340 |
+
self.cur_plan = result
|
341 |
+
self.plan_instruct = plan_instruct
|
342 |
+
|
343 |
+
self.changes_of_plan.append([
|
344 |
+
f"{self.cur_item_id} (Initial)",
|
345 |
+
False,
|
346 |
+
json.dumps(extract_jsons_from_text(result)[-1]),
|
347 |
+
])
|
348 |
+
|
349 |
+
if self.verbose:
|
350 |
+
print(f"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.")
|
351 |
+
return result
|
352 |
+
|
353 |
+
def get_rebid_instruct(self, auctioneer_msg: str):
|
354 |
+
self.dialogue_history += [
|
355 |
+
HumanMessage(content=auctioneer_msg),
|
356 |
+
AIMessage(content='')
|
357 |
+
]
|
358 |
+
return auctioneer_msg
|
359 |
+
|
360 |
+
def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):
|
361 |
+
auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')
|
362 |
+
|
363 |
+
bid_instruct = INSTRUCT_BID_TEMPLATE.format(
|
364 |
+
auctioneer_msg=auctioneer_msg,
|
365 |
+
bidder_name=self.name,
|
366 |
+
cur_item=self._get_cur_item(),
|
367 |
+
estimated_value=self._get_estimated_value(self._get_cur_item()),
|
368 |
+
desire_desc=DESIRE_DESC[self.desire],
|
369 |
+
learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT
|
370 |
+
)
|
371 |
+
if bid_round == 0:
|
372 |
+
if self.plan_strategy in ['static', 'none']:
|
373 |
+
# if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.
|
374 |
+
bid_instruct = f"""The status quo of this auction so far is:\n"{json.dumps(self.status_quo, indent=4)}"\n\n{bid_instruct}\n---\n"""
|
375 |
+
else:
|
376 |
+
bid_instruct = f'Now, the auctioneer says: "{auctioneer_msg}"'
|
377 |
+
|
378 |
+
self.dialogue_history += [
|
379 |
+
HumanMessage(content=bid_instruct),
|
380 |
+
AIMessage(content='')
|
381 |
+
]
|
382 |
+
return bid_instruct
|
383 |
+
|
384 |
+
def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):
|
385 |
+
'''
|
386 |
+
:param cur_bid: current highest bid
|
387 |
+
:param min_markup_pct: minimum percentage for bid increase
|
388 |
+
:param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)
|
389 |
+
'''
|
390 |
+
# dialogue history already got bid_instruction.
|
391 |
+
cur_item = self._get_cur_item()
|
392 |
+
|
393 |
+
if cur_bid <= 0:
|
394 |
+
next_bid = cur_item.price
|
395 |
+
else:
|
396 |
+
next_bid = cur_bid + min_markup_pct * cur_item.price
|
397 |
+
|
398 |
+
if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:
|
399 |
+
msg = int(next_bid)
|
400 |
+
self.rule_bid_cnt += 1
|
401 |
+
else:
|
402 |
+
msg = -1
|
403 |
+
|
404 |
+
content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '
|
405 |
+
content += "I'm out!" if msg < 0 else f"I bid ${msg}! (Rule generated)"
|
406 |
+
self.dialogue_history += [
|
407 |
+
HumanMessage(content=''),
|
408 |
+
AIMessage(content=content)
|
409 |
+
]
|
410 |
+
|
411 |
+
return msg
|
412 |
+
|
413 |
+
def bid(self, bid_instruct):
|
414 |
+
'''
|
415 |
+
Bid for an item with auctioneer's instruction and bidding history.
|
416 |
+
bid_history = bid(system_message, instruct_plan, plan, bid_history)
|
417 |
+
'''
|
418 |
+
if self.model_name == 'rule':
|
419 |
+
return ''
|
420 |
+
|
421 |
+
bid_msg = HumanMessage(content=bid_instruct)
|
422 |
+
|
423 |
+
if self.plan_strategy == 'none':
|
424 |
+
messages = [SystemMessage(content=self.system_message)]
|
425 |
+
else:
|
426 |
+
messages = [SystemMessage(content=self.system_message),
|
427 |
+
HumanMessage(content=self.plan_instruct),
|
428 |
+
AIMessage(content=self.cur_plan)]
|
429 |
+
|
430 |
+
self.bid_history += [bid_msg]
|
431 |
+
messages += self.bid_history
|
432 |
+
|
433 |
+
result = self._run_llm_standalone(messages)
|
434 |
+
|
435 |
+
self.bid_history += [AIMessage(content=result)]
|
436 |
+
|
437 |
+
self.dialogue_history += [
|
438 |
+
HumanMessage(content=''),
|
439 |
+
AIMessage(content=result)
|
440 |
+
]
|
441 |
+
|
442 |
+
self.llm_prompt_history.append({
|
443 |
+
'messages': [{x.type: x.content} for x in messages],
|
444 |
+
'result': result,
|
445 |
+
'tag': f'bid_{self.cur_item_id}'
|
446 |
+
})
|
447 |
+
|
448 |
+
if self.verbose:
|
449 |
+
print(get_colored_text(bid_instruct, 'yellow'))
|
450 |
+
print(get_colored_text(result, 'green'))
|
451 |
+
|
452 |
+
print(f"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.")
|
453 |
+
self.total_bid_cnt += 1
|
454 |
+
|
455 |
+
return result
|
456 |
+
|
457 |
+
def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):
|
458 |
+
instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(
|
459 |
+
cur_item=self._get_cur_item(),
|
460 |
+
bidding_history=bidding_history,
|
461 |
+
hammer_msg=hammer_msg.strip(),
|
462 |
+
win_lose_msg=win_lose_msg.strip(),
|
463 |
+
bidder_name=self.name,
|
464 |
+
prev_status=self._status_json_to_text(self.status_quo),
|
465 |
+
)
|
466 |
+
return instruct
|
467 |
+
|
468 |
+
def summarize(self, instruct_summarize: str):
|
469 |
+
'''
|
470 |
+
Update belief/status quo
|
471 |
+
status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)
|
472 |
+
'''
|
473 |
+
self.budget_history.append(self.budget)
|
474 |
+
self.profit_history.append(self.profit)
|
475 |
+
|
476 |
+
if self.model_name == 'rule':
|
477 |
+
self.rule_bid_cnt = 0 # reset bid count for rule bidder
|
478 |
+
return ''
|
479 |
+
|
480 |
+
messages = [SystemMessage(content=self.system_message)]
|
481 |
+
# messages += self.bid_history
|
482 |
+
summ_msg = HumanMessage(content=instruct_summarize)
|
483 |
+
messages.append(summ_msg)
|
484 |
+
|
485 |
+
status_quo_text = self._run_llm_standalone(messages)
|
486 |
+
|
487 |
+
self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]
|
488 |
+
self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]
|
489 |
+
|
490 |
+
self.llm_prompt_history.append({
|
491 |
+
'messages': [{x.type: x.content} for x in messages],
|
492 |
+
'result': status_quo_text,
|
493 |
+
'tag': f'summarize_{self.cur_item_id}'
|
494 |
+
})
|
495 |
+
|
496 |
+
cnt = 0
|
497 |
+
while cnt <= 3:
|
498 |
+
sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])
|
499 |
+
if sanity_msg == '':
|
500 |
+
# pass sanity check then track beliefs
|
501 |
+
consistency_msg = self._belief_tracking(status_quo_text)
|
502 |
+
else:
|
503 |
+
sanity_msg = f'- {sanity_msg}'
|
504 |
+
consistency_msg = ''
|
505 |
+
|
506 |
+
if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):
|
507 |
+
err_msg = f"As {self.name}, here are some error(s) of your summary of the status JSON:\n{sanity_msg.strip()}\n{consistency_msg.strip()}\n\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.".strip()
|
508 |
+
|
509 |
+
# print(f"{self.name}: revising status quo for the {cnt} time:")
|
510 |
+
# print(get_colored_text(err_msg, 'green'))
|
511 |
+
# print(get_colored_text(status_quo_text, 'red'))
|
512 |
+
|
513 |
+
messages += [AIMessage(content=status_quo_text),
|
514 |
+
HumanMessage(content=err_msg)]
|
515 |
+
status_quo_text = self._run_llm_standalone(messages)
|
516 |
+
self.dialogue_history += [
|
517 |
+
HumanMessage(content=err_msg),
|
518 |
+
AIMessage(content=status_quo_text),
|
519 |
+
]
|
520 |
+
cnt += 1
|
521 |
+
else:
|
522 |
+
break
|
523 |
+
|
524 |
+
self.status_quo = extract_jsons_from_text(status_quo_text)[-1]
|
525 |
+
|
526 |
+
if self.verbose:
|
527 |
+
print(get_colored_text(instruct_summarize, 'blue'))
|
528 |
+
print(get_colored_text(status_quo_text, 'green'))
|
529 |
+
|
530 |
+
print(f"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.")
|
531 |
+
|
532 |
+
return status_quo_text
|
533 |
+
|
534 |
+
def get_replan_instruct(self):
|
535 |
+
instruct = INSTRUCT_REPLAN_TEMPLATE.format(
|
536 |
+
status_quo=self._status_json_to_text(self.status_quo),
|
537 |
+
remaining_items_info=self._get_items_value_str(self._get_remaining_items()),
|
538 |
+
bidder_name=self.name,
|
539 |
+
desire_desc=DESIRE_DESC[self.desire],
|
540 |
+
learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT
|
541 |
+
)
|
542 |
+
return instruct
|
543 |
+
|
544 |
+
def replan(self, instruct_replan: str):
|
545 |
+
'''
|
546 |
+
plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)
|
547 |
+
'''
|
548 |
+
if self.model_name == 'rule':
|
549 |
+
self.withdraw = False
|
550 |
+
self.cur_item_id += 1
|
551 |
+
return ''
|
552 |
+
|
553 |
+
if self.plan_strategy in ['none', 'static']:
|
554 |
+
self.bid_history = [] # clear bid history
|
555 |
+
self.cur_item_id += 1
|
556 |
+
self.withdraw = False
|
557 |
+
return 'Skip replanning for bidders with static or no plan.'
|
558 |
+
|
559 |
+
replan_msg = HumanMessage(content=instruct_replan)
|
560 |
+
|
561 |
+
messages = [SystemMessage(content=self.system_message),
|
562 |
+
HumanMessage(content=self.plan_instruct),
|
563 |
+
AIMessage(content=self.cur_plan)]
|
564 |
+
messages.append(replan_msg)
|
565 |
+
|
566 |
+
result = self._run_llm_standalone(messages)
|
567 |
+
|
568 |
+
new_plan_dict = extract_jsons_from_text(result)[-1]
|
569 |
+
cnt = 0
|
570 |
+
while len(new_plan_dict) == 0 and cnt < 2:
|
571 |
+
err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'
|
572 |
+
messages += [
|
573 |
+
AIMessage(content=result),
|
574 |
+
HumanMessage(content=err_msg),
|
575 |
+
]
|
576 |
+
result = self._run_llm_standalone(messages)
|
577 |
+
new_plan_dict = extract_jsons_from_text(result)[-1]
|
578 |
+
|
579 |
+
self.dialogue_history += [
|
580 |
+
HumanMessage(content=err_msg),
|
581 |
+
AIMessage(content=result),
|
582 |
+
]
|
583 |
+
cnt += 1
|
584 |
+
|
585 |
+
old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]
|
586 |
+
self.changes_of_plan.append([
|
587 |
+
f"{self.cur_item_id + 1} ({self._get_cur_item('name')})",
|
588 |
+
self._change_of_plan(old_plan_dict, new_plan_dict),
|
589 |
+
json.dumps(new_plan_dict)
|
590 |
+
])
|
591 |
+
|
592 |
+
self.plan_instruct = instruct_replan
|
593 |
+
self.cur_plan = result
|
594 |
+
self.withdraw = False
|
595 |
+
self.bid_history = [] # clear bid history
|
596 |
+
self.cur_item_id += 1
|
597 |
+
|
598 |
+
self.dialogue_history += [
|
599 |
+
replan_msg,
|
600 |
+
AIMessage(content=result),
|
601 |
+
]
|
602 |
+
self.llm_prompt_history.append({
|
603 |
+
'messages': [{x.type: x.content} for x in messages],
|
604 |
+
'result': result,
|
605 |
+
'tag': f'plan_{self.cur_item_id}'
|
606 |
+
})
|
607 |
+
|
608 |
+
if self.verbose:
|
609 |
+
print(get_colored_text(instruct_replan, 'blue'))
|
610 |
+
print(get_colored_text(result, 'green'))
|
611 |
+
|
612 |
+
print(f"Replan: {self.name} ({self.model_name}).")
|
613 |
+
return result
|
614 |
+
|
615 |
+
def _change_of_plan(self, old_plan: dict, new_plan: dict):
|
616 |
+
for k in new_plan:
|
617 |
+
if new_plan[k] != old_plan.get(k, None):
|
618 |
+
return True
|
619 |
+
return False
|
620 |
+
|
621 |
+
# *********** Belief Tracking and Sanity Check *********** #
|
622 |
+
|
623 |
+
def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):
|
624 |
+
# can't bid more than budget or less than previous highest bid
|
625 |
+
if bid_price < 0:
|
626 |
+
msg = None
|
627 |
+
else:
|
628 |
+
min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))
|
629 |
+
if bid_price > self.budget:
|
630 |
+
msg = f"you don't have insufficient budget (${self.budget} left)"
|
631 |
+
elif bid_price < self._get_cur_item('price'):
|
632 |
+
msg = f"your bid is lower than the starting bid (${self._get_cur_item('price')})"
|
633 |
+
elif bid_price < prev_round_max_bid + min_bid_increase:
|
634 |
+
msg = f"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%)."
|
635 |
+
else:
|
636 |
+
msg = None
|
637 |
+
return msg
|
638 |
+
|
639 |
+
def rebid_for_failure(self, fail_instruct: str):
|
640 |
+
result = self.bid(fail_instruct)
|
641 |
+
self.failed_bid_cnt += 1
|
642 |
+
return result
|
643 |
+
|
644 |
+
def _sanity_check_status_json(self, data: dict):
|
645 |
+
if data == {}:
|
646 |
+
return "Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400)."
|
647 |
+
|
648 |
+
# Check if all expected top-level keys are present
|
649 |
+
expected_keys = ["remaining_budget", "total_profits", "winning_bids"]
|
650 |
+
for key in expected_keys:
|
651 |
+
if key not in data:
|
652 |
+
return f"Error: Missing '{key}' field in the status JSON."
|
653 |
+
|
654 |
+
# Check if "remaining_budget" is a number
|
655 |
+
if not isinstance(data["remaining_budget"], (int, float)):
|
656 |
+
return "Error: 'remaining_budget' should be a number, and only about your remaining budget."
|
657 |
+
|
658 |
+
# Check if "total_profits" is a dictionary with numbers as values
|
659 |
+
if not isinstance(data["total_profits"], dict):
|
660 |
+
return "Error: 'total_profits' should be a dictionary of every bidder."
|
661 |
+
for bidder, profit in data["total_profits"].items():
|
662 |
+
if not isinstance(profit, (int, float)):
|
663 |
+
return f"Error: Profit for {bidder} should be a number."
|
664 |
+
|
665 |
+
# Check if "winning_bids" is a dictionary and that each bidder's entry is a dictionary with numbers
|
666 |
+
if not isinstance(data["winning_bids"], dict):
|
667 |
+
return "Error: 'winning_bids' should be a dictionary."
|
668 |
+
for bidder, bids in data["winning_bids"].items():
|
669 |
+
if not isinstance(bids, dict):
|
670 |
+
return f"Error: Bids for {bidder} should be a dictionary."
|
671 |
+
for item, amount in bids.items():
|
672 |
+
if not isinstance(amount, (int, float)):
|
673 |
+
return f"Error: Amount for {item} under {bidder} should be a number."
|
674 |
+
|
675 |
+
# If everything is fine
|
676 |
+
return ""
|
677 |
+
|
678 |
+
def _status_json_to_text(self, data: dict):
|
679 |
+
if 'rule' in self.model_name: return ''
|
680 |
+
|
681 |
+
# Extract and format remaining budget
|
682 |
+
structured_text = f"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\n\n"
|
683 |
+
|
684 |
+
# Extract and format total profits for each bidder
|
685 |
+
structured_text += "* Total Profits:\n"
|
686 |
+
if data.get('total_profits'):
|
687 |
+
for bidder, profit in data['total_profits'].items():
|
688 |
+
structured_text += f" * {bidder}: ${profit}\n"
|
689 |
+
|
690 |
+
# Extract and list the winning bids for each item by each bidder
|
691 |
+
structured_text += "\n* Winning Bids:\n"
|
692 |
+
if data.get('winning_bids'):
|
693 |
+
for bidder, bids in data['winning_bids'].items():
|
694 |
+
structured_text += f" * {bidder}:\n"
|
695 |
+
if bids:
|
696 |
+
for item, amount in bids.items():
|
697 |
+
structured_text += f" * {item}: ${amount}\n"
|
698 |
+
else:
|
699 |
+
structured_text += f" * No winning bids\n"
|
700 |
+
|
701 |
+
return structured_text.strip()
|
702 |
+
|
703 |
+
def _belief_tracking(self, status_text: str):
|
704 |
+
'''
|
705 |
+
Parse status quo and check if the belief is correct.
|
706 |
+
'''
|
707 |
+
belief_json = extract_jsons_from_text(status_text)[-1]
|
708 |
+
# {"remaining_budget": 8000, "total_profits": {"Bidder 1": 1300, "Bidder 2": 1800, "Bidder 3": 0}, "winning_bids": {"Bidder 1": {"Item 2": 1200, "Item 3": 1000}, "Bidder 2": {"Item 1": 2000}, "Bidder 3": {}}}
|
709 |
+
budget_belief = belief_json['remaining_budget']
|
710 |
+
profits_belief = belief_json['total_profits']
|
711 |
+
winning_bids = belief_json['winning_bids']
|
712 |
+
|
713 |
+
msg = ''
|
714 |
+
# track belief of budget
|
715 |
+
self.total_self_belief_cnt += 1
|
716 |
+
if budget_belief != self.budget:
|
717 |
+
msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\n'
|
718 |
+
self.self_belief_error_cnt += 1
|
719 |
+
self.budget_error_history.append([
|
720 |
+
self._get_cur_item('name'),
|
721 |
+
budget_belief,
|
722 |
+
self.budget,
|
723 |
+
])
|
724 |
+
|
725 |
+
# track belief of profits
|
726 |
+
for bidder_name, profit in profits_belief.items():
|
727 |
+
if self.all_bidders_status.get(bidder_name) is None:
|
728 |
+
# due to a potentially unreasonable parsing
|
729 |
+
continue
|
730 |
+
|
731 |
+
if self.name in bidder_name:
|
732 |
+
bidder_name = self.name
|
733 |
+
self.total_self_belief_cnt += 1
|
734 |
+
else:
|
735 |
+
self.total_other_belief_cnt += 1
|
736 |
+
|
737 |
+
real_profit = self.all_bidders_status[bidder_name]['profit']
|
738 |
+
|
739 |
+
if profit != real_profit:
|
740 |
+
if self.name == bidder_name:
|
741 |
+
self.self_belief_error_cnt += 1
|
742 |
+
else:
|
743 |
+
self.other_belief_error_cnt += 1
|
744 |
+
|
745 |
+
msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\n'
|
746 |
+
|
747 |
+
# add to history
|
748 |
+
self.profit_error_history.append([
|
749 |
+
f"{bidder_name} ({self._get_cur_item('name')})",
|
750 |
+
profit,
|
751 |
+
real_profit
|
752 |
+
])
|
753 |
+
|
754 |
+
# track belief of winning bids
|
755 |
+
for bidder_name, items_won_dict in winning_bids.items():
|
756 |
+
if self.all_bidders_status.get(bidder_name) is None:
|
757 |
+
# due to a potentially unreasonable parsing
|
758 |
+
continue
|
759 |
+
|
760 |
+
real_items_won = self.all_bidders_status[bidder_name]['items_won']
|
761 |
+
# items_won = [(item, bid_price), ...)]
|
762 |
+
|
763 |
+
items_won_list = list(items_won_dict.keys())
|
764 |
+
real_items_won_list = [str(x) for x, _ in real_items_won]
|
765 |
+
|
766 |
+
if self.name in bidder_name:
|
767 |
+
self.total_self_belief_cnt += 1
|
768 |
+
else:
|
769 |
+
self.total_other_belief_cnt += 1
|
770 |
+
|
771 |
+
if not item_list_equal(items_won_list, real_items_won_list):
|
772 |
+
if bidder_name == self.name:
|
773 |
+
self.self_belief_error_cnt += 1
|
774 |
+
_bidder_name = f'you'
|
775 |
+
else:
|
776 |
+
self.other_belief_error_cnt += 1
|
777 |
+
_bidder_name = bidder_name
|
778 |
+
|
779 |
+
msg += f"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\n"
|
780 |
+
|
781 |
+
self.win_bid_error_history.append([
|
782 |
+
f"{_bidder_name} ({self._get_cur_item('name')})",
|
783 |
+
', '.join(items_won_list),
|
784 |
+
', '.join(real_items_won_list)
|
785 |
+
])
|
786 |
+
|
787 |
+
return msg
|
788 |
+
|
789 |
+
def win_bid(self, item: Item, bid: int):
|
790 |
+
self.budget -= bid
|
791 |
+
self.profit += item.true_value - bid
|
792 |
+
self.items_won += [[item, bid]]
|
793 |
+
msg = f"Congratuations! You won {item} at ${bid}."# Now you have ${self.budget} left. Your total profit so far is ${self.profit}."
|
794 |
+
return msg
|
795 |
+
|
796 |
+
def lose_bid(self, item: Item):
|
797 |
+
return f"You lost {item}."# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}."
|
798 |
+
|
799 |
+
# set the profit information of other bidders
|
800 |
+
def set_all_bidders_status(self, all_bidders_status: dict):
|
801 |
+
self.all_bidders_status = all_bidders_status.copy()
|
802 |
+
|
803 |
+
def set_withdraw(self, bid: int):
|
804 |
+
if bid < 0: # withdraw
|
805 |
+
self.withdraw = True
|
806 |
+
elif bid == 0: # enable discount and bid again
|
807 |
+
self.withdraw = False
|
808 |
+
else: # normal bid
|
809 |
+
self.withdraw = False
|
810 |
+
self.engagement_count += 1
|
811 |
+
self.engagement_history[self._get_cur_item('name')] += 1
|
812 |
+
|
813 |
+
# ****************** Logging ****************** #
|
814 |
+
|
815 |
+
# def _parse_hedging(self, plan: str): # deprecated
|
816 |
+
# prompt = PARSE_HEDGE_INSTRUCTION.format(
|
817 |
+
# item_name=self._get_cur_item(),
|
818 |
+
# plan=plan)
|
819 |
+
|
820 |
+
# with get_openai_callback() as cb:
|
821 |
+
# llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)
|
822 |
+
# result = llm([HumanMessage(content=prompt)]).content
|
823 |
+
# self.openai_cost += cb.total_cost
|
824 |
+
# # parse a number, which could be a digit
|
825 |
+
# hedge_percent = re.findall(r'\d+\.?\d*%', result)
|
826 |
+
# if len(hedge_percent) > 0:
|
827 |
+
# hedge_percent = hedge_percent[0].replace('%', '')
|
828 |
+
# else:
|
829 |
+
# hedge_percent = 0
|
830 |
+
# return float(hedge_percent)
|
831 |
+
|
832 |
+
def profit_report(self):
|
833 |
+
'''
|
834 |
+
Personal profit report at the end of an auction.
|
835 |
+
'''
|
836 |
+
msg = f"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\n"
|
837 |
+
profit = 0
|
838 |
+
for item, bid in self.items_won:
|
839 |
+
profit += item.true_value - bid
|
840 |
+
msg += f" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\n"
|
841 |
+
return msg.strip()
|
842 |
+
|
843 |
+
def to_monitors(self, as_json=False):
|
844 |
+
# budget, profit, items_won, tokens
|
845 |
+
if len(self.items_won) == 0 and not as_json:
|
846 |
+
items_won = [['', 0, 0]]
|
847 |
+
else:
|
848 |
+
items_won = []
|
849 |
+
for item, bid in self.items_won:
|
850 |
+
items_won.append([str(item), bid, item.true_value])
|
851 |
+
|
852 |
+
profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]
|
853 |
+
win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]
|
854 |
+
budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]
|
855 |
+
changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]
|
856 |
+
|
857 |
+
if as_json:
|
858 |
+
return {
|
859 |
+
'auction_hash': self.auction_hash,
|
860 |
+
'bidder_name': self.name,
|
861 |
+
'model_name': self.model_name,
|
862 |
+
'desire': self.desire,
|
863 |
+
'plan_strategy': self.plan_strategy,
|
864 |
+
'overestimate_percent': self.overestimate_percent,
|
865 |
+
'temperature': self.temperature,
|
866 |
+
'correct_belief': self.correct_belief,
|
867 |
+
'enable_learning': self.enable_learning,
|
868 |
+
'budget': self.original_budget,
|
869 |
+
'money_left': self.budget,
|
870 |
+
'profit': self.profit,
|
871 |
+
'items_won': items_won,
|
872 |
+
'tokens_used': self.llm_token_count,
|
873 |
+
'openai_cost': round(self.openai_cost, 2),
|
874 |
+
'failed_bid_cnt': self.failed_bid_cnt,
|
875 |
+
'self_belief_error_cnt': self.self_belief_error_cnt,
|
876 |
+
'other_belief_error_cnt': self.other_belief_error_cnt,
|
877 |
+
'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),
|
878 |
+
'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),
|
879 |
+
'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),
|
880 |
+
'engagement_count': self.engagement_count,
|
881 |
+
'engagement_history': self.engagement_history,
|
882 |
+
'changes_of_plan': changes_of_plan,
|
883 |
+
'budget_error_history': budget_error_history,
|
884 |
+
'profit_error_history': profit_error_history,
|
885 |
+
'win_bid_error_history': win_bid_error_history,
|
886 |
+
'history': self.llm_prompt_history
|
887 |
+
}
|
888 |
+
else:
|
889 |
+
return [
|
890 |
+
self.budget,
|
891 |
+
self.profit,
|
892 |
+
items_won,
|
893 |
+
self.llm_token_count,
|
894 |
+
round(self.openai_cost, 2),
|
895 |
+
round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),
|
896 |
+
round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),
|
897 |
+
round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),
|
898 |
+
self.engagement_count,
|
899 |
+
draw_plot(f"{self.name} ({self.model_name})", self.budget_history, self.profit_history),
|
900 |
+
changes_of_plan,
|
901 |
+
budget_error_history,
|
902 |
+
profit_error_history,
|
903 |
+
win_bid_error_history
|
904 |
+
]
|
905 |
+
|
906 |
+
def dialogue_to_chatbot(self):
|
907 |
+
# chatbot: [[Human, AI], [], ...]
|
908 |
+
# only dialogue will be sent to LLMs. chatbot is just for display.
|
909 |
+
assert len(self.dialogue_history) % 2 == 0
|
910 |
+
chatbot = []
|
911 |
+
for i in range(0, len(self.dialogue_history), 2):
|
912 |
+
# if exceeds the length of dialogue, append the last message
|
913 |
+
human_msg = self.dialogue_history[i].content
|
914 |
+
ai_msg = self.dialogue_history[i+1].content
|
915 |
+
if ai_msg == '': ai_msg = None
|
916 |
+
if human_msg == '': human_msg = None
|
917 |
+
chatbot.append([human_msg, ai_msg])
|
918 |
+
return chatbot
|
919 |
+
|
920 |
+
|
921 |
+
def draw_plot(title, hedge_list, profit_list):
|
922 |
+
x1 = [str(i) for i in range(len(hedge_list))]
|
923 |
+
x2 = [str(i) for i in range(len(profit_list))]
|
924 |
+
y1 = hedge_list
|
925 |
+
y2 = profit_list
|
926 |
+
|
927 |
+
fig, ax1 = plt.subplots()
|
928 |
+
|
929 |
+
color = 'tab:red'
|
930 |
+
ax1.set_xlabel('Bidding Round')
|
931 |
+
ax1.set_ylabel('Budget Left ($)', color=color)
|
932 |
+
ax1.plot(x1, y1, color=color, marker='o')
|
933 |
+
ax1.tick_params(axis='y', labelcolor=color)
|
934 |
+
|
935 |
+
for i, j in zip(x1, y1):
|
936 |
+
ax1.text(i, j, str(j), color=color)
|
937 |
+
|
938 |
+
ax2 = ax1.twinx()
|
939 |
+
color = 'tab:blue'
|
940 |
+
ax2.set_ylabel('Total Profit ($)', color=color)
|
941 |
+
ax2.plot(x2, y2, color=color, marker='^')
|
942 |
+
ax2.tick_params(axis='y', labelcolor=color)
|
943 |
+
|
944 |
+
for i, j in zip(x2, y2):
|
945 |
+
ax2.text(i, j, str(j), color=color)
|
946 |
+
|
947 |
+
lines1, labels1 = ax1.get_legend_handles_labels()
|
948 |
+
lines2, labels2 = ax2.get_legend_handles_labels()
|
949 |
+
ax2.legend(lines1 + lines2, labels1 + labels2, loc=0)
|
950 |
+
|
951 |
+
# fig.tight_layout()
|
952 |
+
plt.title(title)
|
953 |
+
|
954 |
+
return fig
|
955 |
+
|
956 |
+
|
957 |
+
def bidding_multithread(bidder_list: List[Bidder],
|
958 |
+
instruction_list,
|
959 |
+
func_type,
|
960 |
+
thread_num=5,
|
961 |
+
retry=1):
|
962 |
+
'''
|
963 |
+
auctioneer_msg: either a uniform message (str) or customed (list)
|
964 |
+
'''
|
965 |
+
assert func_type in ['plan', 'bid', 'summarize', 'replan']
|
966 |
+
|
967 |
+
result_queue = queue.Queue()
|
968 |
+
threads = []
|
969 |
+
semaphore = threading.Semaphore(thread_num)
|
970 |
+
|
971 |
+
def run_once(i: int, bidder: Bidder, auctioneer_msg: str):
|
972 |
+
try:
|
973 |
+
semaphore.acquire()
|
974 |
+
if func_type == 'bid':
|
975 |
+
|
976 |
+
result = bidder.bid(auctioneer_msg)
|
977 |
+
elif func_type == 'summarize':
|
978 |
+
result = bidder.summarize(auctioneer_msg)
|
979 |
+
elif func_type == 'plan':
|
980 |
+
result = bidder.init_plan(auctioneer_msg)
|
981 |
+
elif func_type == 'replan':
|
982 |
+
result = bidder.replan(auctioneer_msg)
|
983 |
+
else:
|
984 |
+
raise NotImplementedError(f'func_type {func_type} not implemented')
|
985 |
+
result_queue.put((True, i, result))
|
986 |
+
# except Exception as e:
|
987 |
+
# result_queue.put((False, i, str(trace_back(e))))
|
988 |
+
finally:
|
989 |
+
semaphore.release()
|
990 |
+
|
991 |
+
if isinstance(instruction_list, str):
|
992 |
+
instruction_list = [instruction_list] * len(bidder_list)
|
993 |
+
|
994 |
+
for i, (bidder, msg) in enumerate(zip(bidder_list, instruction_list)):
|
995 |
+
thread = threading.Thread(target=run_once, args=(i, bidder, msg))
|
996 |
+
thread.start()
|
997 |
+
threads.append(thread)
|
998 |
+
|
999 |
+
for thread in threads:
|
1000 |
+
thread.join(timeout=600)
|
1001 |
+
|
1002 |
+
results = [result_queue.get() for _ in range(len(bidder_list))]
|
1003 |
+
|
1004 |
+
errors = []
|
1005 |
+
for success, id, result in results:
|
1006 |
+
if not success:
|
1007 |
+
errors.append((id, result))
|
1008 |
+
|
1009 |
+
if errors:
|
1010 |
+
raise Exception(f"Error(s) in {func_type}:\n" + '\n'.join([f'{i}: {e}' for i, e in errors]))
|
1011 |
+
|
1012 |
+
valid_results = [x[1:] for x in results if x[0]]
|
1013 |
+
valid_results.sort()
|
1014 |
+
|
1015 |
+
return [x for _, x in valid_results]
|
1016 |
+
|
1017 |
+
|
1018 |
+
def bidders_to_chatbots(bidder_list: List[Bidder], profit_report=False):
|
1019 |
+
if profit_report: # usually at the end of an auction
|
1020 |
+
return [x.dialogue_to_chatbot() + [[x.profit_report(), None]] for x in bidder_list]
|
1021 |
+
else:
|
1022 |
+
return [x.dialogue_to_chatbot() for x in bidder_list]
|
1023 |
+
|
1024 |
+
|
1025 |
+
def create_bidders(bidder_info_jsl, auction_hash):
|
1026 |
+
bidder_info_jsl = LoadJsonL(bidder_info_jsl)
|
1027 |
+
bidder_list = []
|
1028 |
+
for info in bidder_info_jsl:
|
1029 |
+
info['auction_hash'] = auction_hash
|
1030 |
+
bidder_list.append(Bidder.create(**info))
|
1031 |
+
return bidder_list
|
src/human_bidder.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from langchain.schema import (
|
3 |
+
AIMessage,
|
4 |
+
HumanMessage,
|
5 |
+
SystemMessage
|
6 |
+
)
|
7 |
+
from .bidder_base import Bidder, draw_plot
|
8 |
+
from .item_base import Item
|
9 |
+
from langchain.input import get_colored_text
|
10 |
+
import time
|
11 |
+
|
12 |
+
|
13 |
+
class HumanBidder(Bidder):
|
14 |
+
name: str
|
15 |
+
human_name: str = "Adam"
|
16 |
+
budget: int
|
17 |
+
auction_hash: str
|
18 |
+
|
19 |
+
cur_item_id = 0
|
20 |
+
items: list = []
|
21 |
+
withdraw: bool = False
|
22 |
+
|
23 |
+
engagement_count: int = 0
|
24 |
+
original_budget: int = 0
|
25 |
+
profit: int = 0
|
26 |
+
items_won = []
|
27 |
+
|
28 |
+
all_bidders_status = {} # track others' profit
|
29 |
+
|
30 |
+
# essential for demo
|
31 |
+
need_input: bool = False
|
32 |
+
semaphore: int = 0 # if needs input, then semaphore is set as 1, else waits.
|
33 |
+
input_box: str = None # global variable for accepting user input
|
34 |
+
|
35 |
+
# not used
|
36 |
+
model_name: str = 'human'
|
37 |
+
openai_cost = 0
|
38 |
+
desire = ''
|
39 |
+
plan_strategy = ''
|
40 |
+
correct_belief = True
|
41 |
+
|
42 |
+
class Config:
|
43 |
+
arbitrary_types_allowed = True
|
44 |
+
|
45 |
+
def get_plan_instruct(self, items: List[Item]):
|
46 |
+
self.items = items
|
47 |
+
plan_instruct = "As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:\n{items_info}".format(
|
48 |
+
bidder_name=self.name,
|
49 |
+
budget=self.budget,
|
50 |
+
item_num=len(items),
|
51 |
+
items_info=self._get_items_value_str(items)
|
52 |
+
)
|
53 |
+
return plan_instruct
|
54 |
+
|
55 |
+
def init_plan(self, plan_instruct: str):
|
56 |
+
# Human = auctioneer, AI = bidder
|
57 |
+
self.dialogue_history += [
|
58 |
+
HumanMessage(content=plan_instruct),
|
59 |
+
AIMessage(content='Got it!')
|
60 |
+
]
|
61 |
+
return ''
|
62 |
+
|
63 |
+
def get_bid_instruct(self, auctioneer_msg, bid_round):
|
64 |
+
self.dialogue_history += [
|
65 |
+
HumanMessage(content=auctioneer_msg),
|
66 |
+
AIMessage(content='')
|
67 |
+
]
|
68 |
+
return auctioneer_msg
|
69 |
+
|
70 |
+
def bid(self, bid_instruct):
|
71 |
+
# wait for the cue to handle user input
|
72 |
+
while self.semaphore <= 0:
|
73 |
+
time.sleep(1)
|
74 |
+
|
75 |
+
self.dialogue_history += [
|
76 |
+
HumanMessage(content=''),
|
77 |
+
AIMessage(content=self.input_box)
|
78 |
+
]
|
79 |
+
self.semaphore -= 1
|
80 |
+
self.need_input = False
|
81 |
+
return self.input_box
|
82 |
+
|
83 |
+
def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):
|
84 |
+
instruct_summarize = f"{bidding_history}\n\n{hammer_msg}\n{win_lose_msg}"
|
85 |
+
return instruct_summarize
|
86 |
+
|
87 |
+
def summarize(self, instruct_summarize: str):
|
88 |
+
self.dialogue_history += [
|
89 |
+
HumanMessage(content=instruct_summarize),
|
90 |
+
AIMessage(content='Noted.')
|
91 |
+
]
|
92 |
+
self.budget_history.append(self.budget)
|
93 |
+
self.profit_history.append(self.profit)
|
94 |
+
return ''
|
95 |
+
|
96 |
+
def get_replan_instruct(self):
|
97 |
+
return ''
|
98 |
+
|
99 |
+
def replan(self, instruct_replan):
|
100 |
+
self.withdraw = False
|
101 |
+
self.cur_item_id += 1
|
102 |
+
return ''
|
103 |
+
|
104 |
+
def to_monitors(self, as_json=False):
|
105 |
+
items_won = []
|
106 |
+
for item, bid in self.items_won:
|
107 |
+
items_won.append([str(item), bid, item.true_value])
|
108 |
+
if as_json:
|
109 |
+
return {
|
110 |
+
'auction_hash': self.auction_hash,
|
111 |
+
'bidder_name': self.name,
|
112 |
+
'human_name': self.human_name,
|
113 |
+
'model_name': self.model_name,
|
114 |
+
'budget': self.original_budget,
|
115 |
+
'money_left': self.budget,
|
116 |
+
'profit': self.profit,
|
117 |
+
'items_won': items_won,
|
118 |
+
'engagement_count': self.engagement_count,
|
119 |
+
}
|
120 |
+
else:
|
121 |
+
return [
|
122 |
+
self.budget,
|
123 |
+
self.profit,
|
124 |
+
items_won,
|
125 |
+
0,
|
126 |
+
0,
|
127 |
+
round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),
|
128 |
+
0,
|
129 |
+
0,
|
130 |
+
self.engagement_count,
|
131 |
+
draw_plot(f"{self.name} ({self.model_name})", self.budget_history, self.profit_history),
|
132 |
+
[],
|
133 |
+
[],
|
134 |
+
[],
|
135 |
+
[]
|
136 |
+
]
|
137 |
+
|
src/item_base.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append('..')
|
3 |
+
from utils import LoadJsonL
|
4 |
+
|
5 |
+
|
6 |
+
class Item():
|
7 |
+
def __init__(self, id: int, name: str, price: int, desc: str, true_value: int):
|
8 |
+
self.id = id
|
9 |
+
self.name = name
|
10 |
+
self.price = price
|
11 |
+
self.desc = desc
|
12 |
+
self.true_value = true_value
|
13 |
+
self._original_price = price
|
14 |
+
|
15 |
+
def get_desc(self):
|
16 |
+
return f"{self.name}, starting at ${int(self.price)}."
|
17 |
+
|
18 |
+
def __repr__(self):
|
19 |
+
return f"{self.name}"
|
20 |
+
|
21 |
+
def __str__(self):
|
22 |
+
return f"{self.name}"
|
23 |
+
|
24 |
+
def info(self):
|
25 |
+
return f"{self.name}: ${int(self.price)} to ${self.true_value}."
|
26 |
+
|
27 |
+
def lower_price(self, percentage: float = 0.2):
|
28 |
+
# lower starting price by 20%
|
29 |
+
self.price = int(self.price * (1 - percentage))
|
30 |
+
|
31 |
+
def reset_price(self):
|
32 |
+
self.price = self._original_price
|
33 |
+
|
34 |
+
|
35 |
+
def create_items(item_info_jsl):
|
36 |
+
'''
|
37 |
+
item_info: a list of dict (name, price, desc, id)
|
38 |
+
'''
|
39 |
+
item_info_jsl = LoadJsonL(item_info_jsl)
|
40 |
+
item_list = []
|
41 |
+
for info in item_info_jsl:
|
42 |
+
item_list.append(Item(**info))
|
43 |
+
return item_list
|
44 |
+
|
45 |
+
|
46 |
+
def item_list_equal(items_1: list, items_2: list):
|
47 |
+
# could be a list of strings (names) or a list of Items
|
48 |
+
item_1_names = [item.name if isinstance(item, Item) else item for item in items_1]
|
49 |
+
item_2_names = [item.name if isinstance(item, Item) else item for item in items_2]
|
50 |
+
return set(item_1_names) == set(item_2_names)
|
src/prompt_base.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# for bidder
|
2 |
+
SYSTEM_MESSAGE = """
|
3 |
+
You are {name}, who is attending an ascending-bid auction as a bidder. This auction will have some other bidders to compete with you in bidding wars. The price is gradually raised, bidders drop out until finally only one bidder remains, and that bidder wins the item at this final price. Remember: {desire_desc}.
|
4 |
+
|
5 |
+
Here are some must-know rules for this auction:
|
6 |
+
|
7 |
+
1. Item Values: The true value of an item means its resale value in the broader market, which you don't know. You will have a personal estimation of the item value. However, note that your estimated value could deviate from the true value, due to your potential overestimation or underestimation of this item.
|
8 |
+
2. Winning Bid: The highest bid wins the item. Your profit from winning an item is determined by the difference between the item's true value and your winning bid. You should try to win an item at a bid as minimal as possible to save your budget.
|
9 |
+
""".strip()
|
10 |
+
|
11 |
+
|
12 |
+
_LEARNING_STATEMENT = " and your learnings from previous auctions"
|
13 |
+
|
14 |
+
|
15 |
+
INSTRUCT_PLAN_TEMPLATE = """
|
16 |
+
As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:
|
17 |
+
{items_info}
|
18 |
+
|
19 |
+
---
|
20 |
+
|
21 |
+
Please plan for your bidding strategy for the auction based on the information{learning_statement}. A well-thought-out plan positions you advantageously against competitors, allowing you to allocate resources effectively. With a clear strategy, you can make decisions rapidly and confidently, especially under the pressure of the auction environment. Remember: {desire_desc}.
|
22 |
+
|
23 |
+
After articulate your thinking, in you plan, assign a priority level to each item. Present the priorities for all items in a JSON format, each item should be represented as a key-value pair, where the key is the item name and the value is its priority on the scale from 1-3. An example output is: {{"Fixture Y": 3, "Module B": 2, "Product G": 2}}. The descriptions of the priority scale of items are as follows.
|
24 |
+
* 1 - This item is the least important. Consider giving it up if necessary to save money for the rest of the auction.
|
25 |
+
* 2 - This item holds value but isn't a top priority for the bidder. Could bid on it if you have enough budget.
|
26 |
+
* 3 - This item is of utmost importance and is a top priority for the bidder in the rest of the auction.
|
27 |
+
""".strip()
|
28 |
+
|
29 |
+
|
30 |
+
INSTRUCT_BID_TEMPLATE = """
|
31 |
+
Now, the auctioneer says: "{auctioneer_msg}"
|
32 |
+
|
33 |
+
---
|
34 |
+
|
35 |
+
As {bidder_name}, you have to decide whether to bid on this item or withdraw and explain why, according to your plan{learning_statement}. Remember, {desire_desc}.
|
36 |
+
|
37 |
+
Here are some common practices of bidding:
|
38 |
+
1. Showing your interest by bidding with or slightly above the starting price of this item, then gradually increase your bid.
|
39 |
+
2. Think step by step of the pros and cons and the consequences of your action (e.g., remaining budget in future bidding) in order to achieve your primary objective.
|
40 |
+
|
41 |
+
Give your reasons first, then make your final decision clearly. You should either withdraw (saying "I'm out!") or make a higher bid for this item (saying "I bid $xxx!").
|
42 |
+
""".strip()
|
43 |
+
|
44 |
+
|
45 |
+
INSTRUCT_SUMMARIZE_TEMPLATE = """
|
46 |
+
Here is the history of the bidding war of {cur_item}:
|
47 |
+
"{bidding_history}"
|
48 |
+
|
49 |
+
The auctioneer concludes: "{hammer_msg}"
|
50 |
+
|
51 |
+
---
|
52 |
+
|
53 |
+
{win_lose_msg}
|
54 |
+
As {bidder_name}, you have to update the status of the auction based on this round of bidding. Here's your previous status:
|
55 |
+
```
|
56 |
+
{prev_status}
|
57 |
+
```
|
58 |
+
|
59 |
+
Summarize the notable behaviors of all bidders in this round of bidding for future reference. Then, update the status JSON regarding the following information:
|
60 |
+
- 'remaining_budget': The remaining budget of you, expressed as a numerical value.
|
61 |
+
- 'total_profits': The total profits achieved so far for each bidder, where a numerical value following a bidder's name. No equation is needed, just the numerical value.
|
62 |
+
- 'winning_bids': The winning bids for every item won by each bidder, listed as key-value pairs, for example, {{"bidder_name": {{"item_name_1": winning_bid}}, {{"item_name_2": winning_bid}}, ...}}. If a bidder hasn't won any item, then the value for this bidder should be an empty dictionary {{}}.
|
63 |
+
- Only include the bidders mentioned in the given text. If a bidder is not mentioned (e.g. Bidder 4 in the following example), then do not include it in the JSON object.
|
64 |
+
|
65 |
+
After summarizing the bidding history, you must output the current status in a parsible JSON format. An example output looks like:
|
66 |
+
```
|
67 |
+
{{"remaining_budget": 8000, "total_profits": {{"Bidder 1": 1300, "Bidder 2": 1800, "Bidder 3": 0}}, "winning_bids": {{"Bidder 1": {{"Item 2": 1200, "Item 3": 1000}}, "Bidder 2": {{"Item 1": 2000}}, "Bidder 3": {{}}}}}}
|
68 |
+
```
|
69 |
+
""".strip()
|
70 |
+
|
71 |
+
|
72 |
+
INSTRUCT_LEARNING_TEMPLATE = """
|
73 |
+
Review and reflect on the historical data provided from a past auction.
|
74 |
+
|
75 |
+
{past_auction_log}
|
76 |
+
|
77 |
+
Here are your past learnings:
|
78 |
+
|
79 |
+
{past_learnings}
|
80 |
+
|
81 |
+
Based on the auction log, formulate or update your learning points that could be advantageous to your strategies in the future. Your learnings should be strategic, and of universal relevance and practical use for future auctions. Consolidate your learnings into a concise numbered list of sentences.
|
82 |
+
""".strip()
|
83 |
+
|
84 |
+
|
85 |
+
INSTRUCT_REPLAN_TEMPLATE = """
|
86 |
+
The current status of you and other bidders is as follows:
|
87 |
+
```
|
88 |
+
{status_quo}
|
89 |
+
```
|
90 |
+
|
91 |
+
Here are the remaining items in the rest of the auction:
|
92 |
+
"{remaining_items_info}"
|
93 |
+
|
94 |
+
As {bidder_name}, considering the current status{learning_statement}, review your strategies. Adjust your plans based on the outcomes and new information to achieve your primary objective. This iterative process ensures that your approach remains relevant and effective. Please do the following:
|
95 |
+
1. Always remember: {desire_desc}.
|
96 |
+
2. Determine and explain if there's a need to update the priority list of remaining items based on the current status.
|
97 |
+
3. Present the updated priorities in a JSON format, each item should be represented as a key-value pair, where the key is the item name and the value is its priority on the scale from 1-3. An example output is: {{"Fixture Y": 3, "Module B": 2, "Product G": 2}}. The descriptions of the priority scale of items are as follows.
|
98 |
+
* 1 - This item is the least important. Consider giving it up if necessary to save money for the rest of the auction.
|
99 |
+
* 2 - This item holds value but isn't a top priority for the bidder. Could bid on it if you have enough budget.
|
100 |
+
* 3 - This item is of utmost importance and is a top priority for the bidder in the rest of the auction.
|
101 |
+
""".strip()
|
102 |
+
|
103 |
+
|
104 |
+
# for auctioneer
|
105 |
+
PARSE_BID_INSTRUCTION = """
|
106 |
+
Your task is to parse a response from a bidder in an auction, and extract the bidding price from the response. Here are the rules:
|
107 |
+
- If the language model decides to withdraw from the bidding (e.g., saying "I'm out!"), output -1.
|
108 |
+
- If a bidding price is mentioned (e.g., saying "I bid $xxx!"), output that price number (e.g., $xxx).
|
109 |
+
Here is the response:
|
110 |
+
|
111 |
+
{response}
|
112 |
+
|
113 |
+
Don't say anything else other than just a number: either the bidding price (e.g., $xxx, with $) or -1.
|
114 |
+
""".strip()
|
115 |
+
|
116 |
+
|
117 |
+
AUCTION_HISTORY = """
|
118 |
+
## Auction Log
|
119 |
+
|
120 |
+
### 1. Equipment E, starting at $5000.
|
121 |
+
|
122 |
+
#### 1st bid:
|
123 |
+
* Bidder 1: $5500
|
124 |
+
* Bidder 2: $5100
|
125 |
+
* Bidder 3: $5100
|
126 |
+
* Bidder 4: $5500
|
127 |
+
* Bidder 5: $6000
|
128 |
+
|
129 |
+
#### 2nd bid:
|
130 |
+
* Bidder 1: Withdrew
|
131 |
+
* Bidder 2: Withdrew
|
132 |
+
* Bidder 3: Withdrew
|
133 |
+
* Bidder 4: $6500
|
134 |
+
|
135 |
+
#### 3rd bid:
|
136 |
+
* Bidder 5: $7000
|
137 |
+
|
138 |
+
#### 4th bid:
|
139 |
+
* Bidder 4: Withdrew
|
140 |
+
|
141 |
+
#### Hammer price (true value):
|
142 |
+
* Bidder 5: $7000 ($10000)
|
143 |
+
|
144 |
+
### 2. Thingamajig C, starting at $1000.
|
145 |
+
|
146 |
+
#### 1st bid:
|
147 |
+
* Bidder 1: $1500
|
148 |
+
* Bidder 2: Withdrew
|
149 |
+
* Bidder 3: Withdrew
|
150 |
+
* Bidder 4: Withdrew
|
151 |
+
* Bidder 5: Withdrew
|
152 |
+
|
153 |
+
#### Hammer price (true value):
|
154 |
+
* Bidder 1: $1500 ($2000)
|
155 |
+
|
156 |
+
### 3. Component S, starting at $1000.
|
157 |
+
|
158 |
+
#### 1st bid:
|
159 |
+
* Bidder 1: $1200
|
160 |
+
* Bidder 2: $1050
|
161 |
+
* Bidder 3: $1000
|
162 |
+
* Bidder 4: Withdrew
|
163 |
+
* Bidder 5: $1200
|
164 |
+
|
165 |
+
#### 2nd bid:
|
166 |
+
* Bidder 2: Withdrew
|
167 |
+
* Bidder 3: $1300
|
168 |
+
* Bidder 5: $1300
|
169 |
+
|
170 |
+
#### 3rd bid:
|
171 |
+
* Bidder 1: Withdrew
|
172 |
+
* Bidder 3: $1400
|
173 |
+
|
174 |
+
#### 4th bid:
|
175 |
+
* Bidder 5: Withdrew
|
176 |
+
|
177 |
+
#### Hammer price (true value):
|
178 |
+
* Bidder 3: $1400 ($2000)
|
179 |
+
|
180 |
+
### 4. Implement G, starting at $1000.
|
181 |
+
|
182 |
+
#### 1st bid:
|
183 |
+
* Bidder 1: $1100
|
184 |
+
* Bidder 2: $1000
|
185 |
+
* Bidder 3: $1100
|
186 |
+
* Bidder 4: Withdrew
|
187 |
+
* Bidder 5: $1500
|
188 |
+
|
189 |
+
#### 2nd bid:
|
190 |
+
* Bidder 1: Withdrew
|
191 |
+
* Bidder 2: Withdrew
|
192 |
+
* Bidder 3: $1600
|
193 |
+
|
194 |
+
#### 3rd bid:
|
195 |
+
* Bidder 5: $1700
|
196 |
+
|
197 |
+
#### 4th bid:
|
198 |
+
* Bidder 3: Withdrew
|
199 |
+
|
200 |
+
#### Hammer price (true value):
|
201 |
+
* Bidder 5: $1700 ($2000)
|
202 |
+
|
203 |
+
### 5. Piece T, starting at $1000.
|
204 |
+
|
205 |
+
#### 1st bid:
|
206 |
+
* Bidder 1: $1100
|
207 |
+
* Bidder 2: $1000
|
208 |
+
* Bidder 3: $1100
|
209 |
+
* Bidder 4: Withdrew
|
210 |
+
* Bidder 5: $1200
|
211 |
+
|
212 |
+
#### 2nd bid:
|
213 |
+
* Bidder 1: Withdrew
|
214 |
+
* Bidder 2: $1300
|
215 |
+
* Bidder 3: $1300
|
216 |
+
|
217 |
+
#### 3rd bid:
|
218 |
+
* Bidder 2: $1400
|
219 |
+
* Bidder 5: Withdrew
|
220 |
+
|
221 |
+
#### 4th bid:
|
222 |
+
* Bidder 3: $1500
|
223 |
+
|
224 |
+
#### 5th bid:
|
225 |
+
* Bidder 2: Withdrew
|
226 |
+
|
227 |
+
#### Hammer price (true value):
|
228 |
+
* Bidder 3: $1500 ($2000)
|
229 |
+
|
230 |
+
### 6. Doodad D, starting at $1000.
|
231 |
+
|
232 |
+
#### 1st bid:
|
233 |
+
* Bidder 1: Withdrew
|
234 |
+
* Bidder 2: $1000
|
235 |
+
* Bidder 3: Withdrew
|
236 |
+
* Bidder 4: $1010
|
237 |
+
* Bidder 5: $1300
|
238 |
+
|
239 |
+
#### 2nd bid:
|
240 |
+
* Bidder 2: Withdrew
|
241 |
+
* Bidder 4: Withdrew
|
242 |
+
|
243 |
+
#### Hammer price (true value):
|
244 |
+
* Bidder 5: $1300 ($2000)
|
245 |
+
|
246 |
+
### 7. Gizmo F, starting at $1000.
|
247 |
+
|
248 |
+
#### 1st bid:
|
249 |
+
* Bidder 1: $1100
|
250 |
+
* Bidder 2: $1000
|
251 |
+
* Bidder 3: Withdrew
|
252 |
+
* Bidder 4: Withdrew
|
253 |
+
* Bidder 5: Withdrew
|
254 |
+
|
255 |
+
#### 2nd bid:
|
256 |
+
* Bidder 2: $1200
|
257 |
+
|
258 |
+
#### 3rd bid:
|
259 |
+
* Bidder 1: Withdrew
|
260 |
+
|
261 |
+
#### Hammer price (true value):
|
262 |
+
* Bidder 2: $1200 ($2000)
|
263 |
+
|
264 |
+
### 8. Widget A, starting at $1000.
|
265 |
+
|
266 |
+
#### 1st bid:
|
267 |
+
* Bidder 1: $2200
|
268 |
+
* Bidder 2: $1000
|
269 |
+
* Bidder 3: $1100
|
270 |
+
* Bidder 4: Withdrew
|
271 |
+
* Bidder 5: Withdrew
|
272 |
+
|
273 |
+
#### 2nd bid:
|
274 |
+
* Bidder 2: Withdrew
|
275 |
+
* Bidder 3: Withdrew
|
276 |
+
|
277 |
+
#### Hammer price (true value):
|
278 |
+
* Bidder 1: $2200 ($2000)
|
279 |
+
|
280 |
+
### 9. Gadget B, starting at $1000.
|
281 |
+
|
282 |
+
#### 1st bid:
|
283 |
+
* Bidder 1: $1200
|
284 |
+
* Bidder 2: Withdrew
|
285 |
+
* Bidder 3: Withdrew
|
286 |
+
* Bidder 4: $1000
|
287 |
+
* Bidder 5: Withdrew
|
288 |
+
|
289 |
+
#### 2nd bid:
|
290 |
+
* Bidder 4: Withdrew
|
291 |
+
|
292 |
+
#### Hammer price (true value):
|
293 |
+
* Bidder 1: $1200 ($2000)
|
294 |
+
|
295 |
+
### 10. Mechanism J, starting at $5000.
|
296 |
+
|
297 |
+
#### 1st bid:
|
298 |
+
* Bidder 1: Withdrew
|
299 |
+
* Bidder 2: $5000
|
300 |
+
* Bidder 3: $5100
|
301 |
+
* Bidder 4: $6000
|
302 |
+
* Bidder 5: Withdrew
|
303 |
+
|
304 |
+
#### 2nd bid:
|
305 |
+
* Bidder 2: $6500
|
306 |
+
* Bidder 3: $6500
|
307 |
+
|
308 |
+
#### 3rd bid:
|
309 |
+
* Bidder 3: $7000
|
310 |
+
* Bidder 4: $7000
|
311 |
+
|
312 |
+
#### 4th bid:
|
313 |
+
* Bidder 2: $7500
|
314 |
+
* Bidder 3: Withdrew
|
315 |
+
|
316 |
+
#### 5th bid:
|
317 |
+
* Bidder 4: $8000
|
318 |
+
|
319 |
+
#### 6th bid:
|
320 |
+
* Bidder 2: $8500
|
321 |
+
|
322 |
+
#### 7th bid:
|
323 |
+
* Bidder 4: Withdrew
|
324 |
+
|
325 |
+
#### Hammer price (true value):
|
326 |
+
* Bidder 2: $8500 ($10000)
|
327 |
+
|
328 |
+
## Personal Report
|
329 |
+
|
330 |
+
* Bidder 1, starting with $10000, has won 3 items in this auction, with a total profit of $1100.:
|
331 |
+
* Won Thingamajig C at $1500 over $1000, with a true value of $2000.
|
332 |
+
* Won Widget A at $2200 over $1000, with a true value of $2000.
|
333 |
+
* Won Gadget B at $1200 over $1000, with a true value of $2000.
|
334 |
+
|
335 |
+
* Bidder 2, starting with $10000, has won 2 items in this auction, with a total profit of $2300.:
|
336 |
+
* Won Gizmo F at $1200 over $1000, with a true value of $2000.
|
337 |
+
* Won Mechanism J at $8500 over $5000, with a true value of $10000.
|
338 |
+
|
339 |
+
* Bidder 3, starting with $10000, has won 2 items in this auction, with a total profit of $1100.:
|
340 |
+
* Won Component S at $1400 over $1000, with a true value of $2000.
|
341 |
+
* Won Piece T at $1500 over $1000, with a true value of $2000.
|
342 |
+
|
343 |
+
* Bidder 4, starting with $10000, has won 0 items in this auction, with a total profit of $0.:
|
344 |
+
|
345 |
+
* Bidder 5, starting with $10000, has won 3 items in this auction, with a total profit of $4000.:
|
346 |
+
* Won Equipment E at $7000 over $5000, with a true value of $10000.
|
347 |
+
* Won Implement G at $1700 over $1000, with a true value of $2000.
|
348 |
+
* Won Doodad D at $1300 over $1000, with a true value of $2000.
|
349 |
+
""".strip()
|
utils.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ujson as json
|
2 |
+
import re
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
|
6 |
+
def trace_back(error_msg):
|
7 |
+
exc = traceback.format_exc()
|
8 |
+
msg = f'[Error]: {error_msg}.\n[Traceback]: {exc}'
|
9 |
+
return msg
|
10 |
+
|
11 |
+
|
12 |
+
def extract_numbered_list(paragraph):
|
13 |
+
# Updated regular expression to match numbered list
|
14 |
+
# It looks for:
|
15 |
+
# - start of line
|
16 |
+
# - one or more digits
|
17 |
+
# - a period or parenthesis
|
18 |
+
# - optional whitespace
|
19 |
+
# - any character (captured in a group) until the end of line or a new number
|
20 |
+
pattern = r"^\s*(\d+[.)]\s?.*?)(?=\s*\d+[.)]|$)"
|
21 |
+
|
22 |
+
matches = re.findall(pattern, paragraph, re.DOTALL | re.MULTILINE)
|
23 |
+
return [match.strip() for match in matches]
|
24 |
+
|
25 |
+
|
26 |
+
def chunks(lst, n):
|
27 |
+
"""Yield successive n-sized chunks from lst."""
|
28 |
+
for i in range(0, len(lst), n):
|
29 |
+
yield lst[i : i + n]
|
30 |
+
|
31 |
+
|
32 |
+
def reset_state_list(*states):
|
33 |
+
empty = [None for _ in states[1:]]
|
34 |
+
return [[]] + empty
|
35 |
+
|
36 |
+
|
37 |
+
def LoadJsonL(filename):
|
38 |
+
if isinstance(filename, str):
|
39 |
+
jsl = []
|
40 |
+
with open(filename) as f:
|
41 |
+
for line in f:
|
42 |
+
jsl.append(json.loads(line))
|
43 |
+
return jsl
|
44 |
+
else:
|
45 |
+
return filename
|
46 |
+
|
47 |
+
|
48 |
+
def extract_jsons_from_text(text):
|
49 |
+
json_dicts = []
|
50 |
+
stack = []
|
51 |
+
start_index = None
|
52 |
+
|
53 |
+
for i, char in enumerate(text):
|
54 |
+
if char == '{':
|
55 |
+
stack.append(char)
|
56 |
+
if start_index is None:
|
57 |
+
start_index = i
|
58 |
+
elif char == '}':
|
59 |
+
if stack:
|
60 |
+
stack.pop()
|
61 |
+
if not stack and start_index is not None:
|
62 |
+
json_candidate = text[start_index:i+1]
|
63 |
+
try:
|
64 |
+
parsed_json = json.loads(json_candidate)
|
65 |
+
json_dicts.append(parsed_json)
|
66 |
+
start_index = None
|
67 |
+
except json.JSONDecodeError:
|
68 |
+
pass
|
69 |
+
finally:
|
70 |
+
start_index = None
|
71 |
+
|
72 |
+
if len(json_dicts) == 0: json_dicts = [{}]
|
73 |
+
return json_dicts
|