jadechoghari commited on
Commit
a526622
1 Parent(s): 96515c2
Files changed (47) hide show
  1. .config/.last_opt_in_prompt.yaml +1 -0
  2. .config/.last_survey_prompt.yaml +1 -0
  3. .config/.last_update_check.json +1 -0
  4. .config/active_config +1 -0
  5. .config/config_sentinel +0 -0
  6. .config/configurations/config_default +6 -0
  7. .config/default_configs.db +0 -0
  8. .config/gce +1 -0
  9. .config/hidden_gcloud_config_universe_descriptor_data_cache_configs.db +0 -0
  10. .config/logs/2024.10.14/13.22.07.775513.log +764 -0
  11. .config/logs/2024.10.14/13.22.28.151005.log +5 -0
  12. .config/logs/2024.10.14/13.22.38.263602.log +123 -0
  13. .config/logs/2024.10.14/13.22.39.146829.log +5 -0
  14. .config/logs/2024.10.14/13.22.49.518111.log +8 -0
  15. .config/logs/2024.10.14/13.22.50.121513.log +8 -0
  16. .gitattributes copy +35 -0
  17. .gradio/certificate.pem +31 -0
  18. =0.26.0 +39 -0
  19. DejaVuSans.ttf +0 -0
  20. __pycache__/app.cpython-310.pyc +0 -0
  21. __pycache__/builder.cpython-310.pyc +0 -0
  22. __pycache__/constants.cpython-310.pyc +0 -0
  23. __pycache__/controller.cpython-310.pyc +0 -0
  24. __pycache__/conversation.cpython-310.pyc +0 -0
  25. __pycache__/gradio_css.cpython-310.pyc +0 -0
  26. __pycache__/gradio_web_server.cpython-310.pyc +0 -0
  27. __pycache__/inference.cpython-310.pyc +0 -0
  28. __pycache__/mm_utils.cpython-310.pyc +0 -0
  29. __pycache__/model_worker.cpython-310.pyc +0 -0
  30. __pycache__/utils.cpython-310.pyc +0 -0
  31. app.py +0 -7
  32. builder.py +170 -0
  33. cli.py +130 -0
  34. constants.py +32 -0
  35. controller.py +298 -0
  36. conversation.py +526 -0
  37. gradio_css.py +72 -0
  38. gradio_web_server.py +509 -0
  39. inference.py +101 -0
  40. mm_utils.py +260 -0
  41. model_UI.py +273 -0
  42. model_worker.py +288 -0
  43. register_worker.py +26 -0
  44. sglang_worker.py +244 -0
  45. test_message.py +62 -0
  46. untitled +0 -0
  47. utils.py +126 -0
.config/.last_opt_in_prompt.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
.config/.last_survey_prompt.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ last_prompt_time: 1728912157.7069197
.config/.last_update_check.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"last_update_check_time": 1728912158.695225, "last_update_check_revision": 20241004144944, "notifications": [], "last_nag_times": {}}
.config/active_config ADDED
@@ -0,0 +1 @@
 
 
1
+ default
.config/config_sentinel ADDED
File without changes
.config/configurations/config_default ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [component_manager]
2
+ disable_update_check = true
3
+
4
+ [compute]
5
+ gce_metadata_read_timeout_sec = 0
6
+
.config/default_configs.db ADDED
Binary file (12.3 kB). View file
 
.config/gce ADDED
@@ -0,0 +1 @@
 
 
1
+ False
.config/hidden_gcloud_config_universe_descriptor_data_cache_configs.db ADDED
Binary file (12.3 kB). View file
 
.config/logs/2024.10.14/13.22.07.775513.log ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-14 13:22:19,800 DEBUG root Loaded Command Group: ['gcloud', 'components']
2
+ 2024-10-14 13:22:19,804 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
3
+ 2024-10-14 13:22:19,806 DEBUG root Running [gcloud.components.update] with arguments: [--compile-python: "True", --quiet: "True", COMPONENT-IDS:6: "['core', 'gcloud-deps', 'bq', 'gcloud', 'gcloud-crc32c', 'gsutil']"]
4
+ 2024-10-14 13:22:19,807 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
5
+
6
+ 2024-10-14 13:22:19,837 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
7
+ 2024-10-14 13:22:19,903 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 227002
8
+ 2024-10-14 13:22:19,919 INFO ___FILE_ONLY___
9
+
10
+ 2024-10-14 13:22:19,920 INFO ___FILE_ONLY___
11
+ Your current Google Cloud CLI version is: 496.0.0
12
+
13
+ 2024-10-14 13:22:19,920 INFO ___FILE_ONLY___ Installing components from version: 496.0.0
14
+
15
+ 2024-10-14 13:22:19,920 INFO ___FILE_ONLY___
16
+
17
+ 2024-10-14 13:22:19,920 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
18
+ 2024-10-14 13:22:19,921 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
19
+ 2024-10-14 13:22:19,921 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
20
+ 2024-10-14 13:22:19,962 INFO ___FILE_ONLY___ ┌─────────────────────────────────────────────────────────────────────────────┐
21
+ 2024-10-14 13:22:19,962 INFO ___FILE_ONLY___
22
+
23
+ 2024-10-14 13:22:19,962 INFO ___FILE_ONLY___ │ These components will be installed. │
24
+ 2024-10-14 13:22:19,962 INFO ___FILE_ONLY___
25
+
26
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┬────────────┬──────────┤
27
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
28
+
29
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
30
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
31
+
32
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┼────────────┼──────────┤
33
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
34
+
35
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ │
36
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ BigQuery Command Line Tool
37
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
38
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ │
39
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ 2.1.9
40
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
41
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ │
42
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___ 1.7 MiB
43
+ 2024-10-14 13:22:19,963 INFO ___FILE_ONLY___
44
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
45
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
46
+
47
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
48
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ BigQuery Command Line Tool (Platform Specific)
49
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
50
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
51
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ 2.1.8
52
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
53
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
54
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ < 1 MiB
55
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
56
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
57
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
58
+
59
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ │
60
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___ Bundled Python 3.11 (Platform Specific)
61
+ 2024-10-14 13:22:19,964 INFO ___FILE_ONLY___
62
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
63
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ 3.11.9
64
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
65
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
66
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ 74.4 MiB
67
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
68
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
69
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
70
+
71
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
72
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool
73
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
74
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
75
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ 5.30
76
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
77
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ │
78
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___ 11.3 MiB
79
+ 2024-10-14 13:22:19,965 INFO ___FILE_ONLY___
80
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
81
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
82
+
83
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
84
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool (Platform Specific)
85
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
86
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
87
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ 5.30
88
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
89
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
90
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ < 1 MiB
91
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
92
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
93
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
94
+
95
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ │
96
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___ Google Cloud CLI Core Libraries (Platform Specific)
97
+ 2024-10-14 13:22:19,966 INFO ___FILE_ONLY___
98
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
99
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ 2024.08.30
100
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___
101
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
102
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ < 1 MiB
103
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___
104
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
105
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___
106
+
107
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
108
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ Google Cloud CRC32C Hash Tool (Platform Specific)
109
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___
110
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
111
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ 1.0.0
112
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___
113
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ │
114
+ 2024-10-14 13:22:19,967 INFO ___FILE_ONLY___ 1.3 MiB
115
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
116
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ │
117
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
118
+
119
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ │
120
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ gcloud cli dependencies (Platform Specific)
121
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
122
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ │
123
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ 2021.04.16
124
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
125
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ │
126
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ < 1 MiB
127
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
128
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ │
129
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
130
+
131
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___ └─────────────────────────────────────────────────────┴────────────┴──────────┘
132
+ 2024-10-14 13:22:19,968 INFO ___FILE_ONLY___
133
+
134
+ 2024-10-14 13:22:19,969 INFO ___FILE_ONLY___
135
+
136
+ 2024-10-14 13:22:19,972 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
137
+ 2024-10-14 13:22:20,037 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1289001
138
+ 2024-10-14 13:22:20,073 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
139
+ https://cloud.google.com/sdk/release_notes
140
+
141
+
142
+ 2024-10-14 13:22:20,073 INFO ___FILE_ONLY___ Performing in place update...
143
+
144
+
145
+ 2024-10-14 13:22:20,075 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
146
+
147
+ 2024-10-14 13:22:20,075 INFO ___FILE_ONLY___ ╠═ Downloading: BigQuery Command Line Tool ═╣
148
+
149
+ 2024-10-14 13:22:20,076 INFO ___FILE_ONLY___ ╚
150
+ 2024-10-14 13:22:20,079 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
151
+ 2024-10-14 13:22:20,139 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-20241004144944.tar.gz HTTP/1.1" 200 1831026
152
+ 2024-10-14 13:22:20,153 INFO ___FILE_ONLY___ ═
153
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
154
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
155
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
156
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
157
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
158
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
159
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
160
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
161
+ 2024-10-14 13:22:20,154 INFO ___FILE_ONLY___ ═
162
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
163
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
164
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
165
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
166
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
167
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
168
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
169
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
170
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
171
+ 2024-10-14 13:22:20,155 INFO ___FILE_ONLY___ ═
172
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
173
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
174
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
175
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
176
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
177
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
178
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
179
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
180
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
181
+ 2024-10-14 13:22:20,156 INFO ___FILE_ONLY___ ═
182
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
183
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
184
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
185
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
186
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
187
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
188
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
189
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
190
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
191
+ 2024-10-14 13:22:20,157 INFO ___FILE_ONLY___ ═
192
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
193
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
194
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
195
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
196
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
197
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
198
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
199
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
200
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
201
+ 2024-10-14 13:22:20,158 INFO ___FILE_ONLY___ ═
202
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
203
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
204
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
205
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
206
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
207
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
208
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
209
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
210
+ 2024-10-14 13:22:20,159 INFO ___FILE_ONLY___ ═
211
+ 2024-10-14 13:22:20,160 INFO ___FILE_ONLY___ ═
212
+ 2024-10-14 13:22:20,160 INFO ___FILE_ONLY___ ╝
213
+
214
+ 2024-10-14 13:22:20,162 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
215
+
216
+ 2024-10-14 13:22:20,162 INFO ___FILE_ONLY___ ╠═ Downloading: BigQuery Command Line Tool (Platform Spe... ═╣
217
+
218
+ 2024-10-14 13:22:20,162 INFO ___FILE_ONLY___ ╚
219
+ 2024-10-14 13:22:20,165 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
220
+ 2024-10-14 13:22:20,228 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-nix-20240830134514.tar.gz HTTP/1.1" 200 1914
221
+ 2024-10-14 13:22:20,228 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
222
+ 2024-10-14 13:22:20,228 INFO ___FILE_ONLY___ ╝
223
+
224
+ 2024-10-14 13:22:20,230 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
225
+
226
+ 2024-10-14 13:22:20,231 INFO ___FILE_ONLY___ ╠═ Downloading: Bundled Python 3.11 ═╣
227
+
228
+ 2024-10-14 13:22:20,231 INFO ___FILE_ONLY___ ╚
229
+ 2024-10-14 13:22:20,231 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
230
+ 2024-10-14 13:22:20,231 INFO ___FILE_ONLY___ ╝
231
+
232
+ 2024-10-14 13:22:20,233 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
233
+
234
+ 2024-10-14 13:22:20,233 INFO ___FILE_ONLY___ ╠═ Downloading: Bundled Python 3.11 (Platform Specific) ═╣
235
+
236
+ 2024-10-14 13:22:20,233 INFO ___FILE_ONLY___ ╚
237
+ 2024-10-14 13:22:20,236 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
238
+ 2024-10-14 13:22:20,300 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bundled-python3-unix-linux-x86_64-20240920144119.tar.gz HTTP/1.1" 200 78001783
239
+ 2024-10-14 13:22:20,560 INFO ___FILE_ONLY___ ═
240
+ 2024-10-14 13:22:20,561 INFO ___FILE_ONLY___ ═
241
+ 2024-10-14 13:22:20,563 INFO ___FILE_ONLY___ ═
242
+ 2024-10-14 13:22:20,564 INFO ___FILE_ONLY___ ═
243
+ 2024-10-14 13:22:20,566 INFO ___FILE_ONLY___ ═
244
+ 2024-10-14 13:22:20,567 INFO ___FILE_ONLY___ ═
245
+ 2024-10-14 13:22:20,569 INFO ___FILE_ONLY___ ═
246
+ 2024-10-14 13:22:20,570 INFO ___FILE_ONLY___ ═
247
+ 2024-10-14 13:22:20,572 INFO ___FILE_ONLY___ ═
248
+ 2024-10-14 13:22:20,573 INFO ___FILE_ONLY___ ═
249
+ 2024-10-14 13:22:20,574 INFO ___FILE_ONLY___ ═
250
+ 2024-10-14 13:22:20,576 INFO ___FILE_ONLY___ ═
251
+ 2024-10-14 13:22:20,577 INFO ___FILE_ONLY___ ═
252
+ 2024-10-14 13:22:20,579 INFO ___FILE_ONLY___ ═
253
+ 2024-10-14 13:22:20,580 INFO ___FILE_ONLY___ ═
254
+ 2024-10-14 13:22:20,581 INFO ___FILE_ONLY___ ═
255
+ 2024-10-14 13:22:20,583 INFO ___FILE_ONLY___ ═
256
+ 2024-10-14 13:22:20,584 INFO ___FILE_ONLY___ ═
257
+ 2024-10-14 13:22:20,586 INFO ___FILE_ONLY___ ═
258
+ 2024-10-14 13:22:20,587 INFO ___FILE_ONLY___ ═
259
+ 2024-10-14 13:22:20,589 INFO ___FILE_ONLY___ ═
260
+ 2024-10-14 13:22:20,590 INFO ___FILE_ONLY___ ═
261
+ 2024-10-14 13:22:20,591 INFO ___FILE_ONLY___ ═
262
+ 2024-10-14 13:22:20,593 INFO ___FILE_ONLY___ ═
263
+ 2024-10-14 13:22:20,594 INFO ___FILE_ONLY___ ═
264
+ 2024-10-14 13:22:20,596 INFO ___FILE_ONLY___ ═
265
+ 2024-10-14 13:22:20,597 INFO ___FILE_ONLY___ ═
266
+ 2024-10-14 13:22:20,599 INFO ___FILE_ONLY___ ═
267
+ 2024-10-14 13:22:20,600 INFO ___FILE_ONLY___ ═
268
+ 2024-10-14 13:22:20,601 INFO ___FILE_ONLY___ ═
269
+ 2024-10-14 13:22:20,603 INFO ___FILE_ONLY___ ═
270
+ 2024-10-14 13:22:20,604 INFO ___FILE_ONLY___ ═
271
+ 2024-10-14 13:22:20,606 INFO ___FILE_ONLY___ ═
272
+ 2024-10-14 13:22:20,607 INFO ___FILE_ONLY___ ═
273
+ 2024-10-14 13:22:20,608 INFO ___FILE_ONLY___ ═
274
+ 2024-10-14 13:22:20,610 INFO ___FILE_ONLY___ ═
275
+ 2024-10-14 13:22:20,611 INFO ___FILE_ONLY___ ═
276
+ 2024-10-14 13:22:20,613 INFO ___FILE_ONLY___ ═
277
+ 2024-10-14 13:22:20,614 INFO ___FILE_ONLY___ ═
278
+ 2024-10-14 13:22:20,616 INFO ___FILE_ONLY___ ═
279
+ 2024-10-14 13:22:20,617 INFO ___FILE_ONLY___ ═
280
+ 2024-10-14 13:22:20,618 INFO ___FILE_ONLY___ ═
281
+ 2024-10-14 13:22:20,620 INFO ___FILE_ONLY___ ═
282
+ 2024-10-14 13:22:20,621 INFO ___FILE_ONLY___ ═
283
+ 2024-10-14 13:22:20,623 INFO ___FILE_ONLY___ ═
284
+ 2024-10-14 13:22:20,624 INFO ___FILE_ONLY___ ═
285
+ 2024-10-14 13:22:20,625 INFO ___FILE_ONLY___ ═
286
+ 2024-10-14 13:22:20,627 INFO ___FILE_ONLY___ ═
287
+ 2024-10-14 13:22:20,628 INFO ___FILE_ONLY___ ═
288
+ 2024-10-14 13:22:20,630 INFO ___FILE_ONLY___ ═
289
+ 2024-10-14 13:22:20,631 INFO ___FILE_ONLY___ ═
290
+ 2024-10-14 13:22:20,633 INFO ___FILE_ONLY___ ═
291
+ 2024-10-14 13:22:20,634 INFO ___FILE_ONLY___ ═
292
+ 2024-10-14 13:22:20,635 INFO ___FILE_ONLY___ ═
293
+ 2024-10-14 13:22:20,637 INFO ___FILE_ONLY___ ═
294
+ 2024-10-14 13:22:20,638 INFO ___FILE_ONLY___ ═
295
+ 2024-10-14 13:22:20,640 INFO ___FILE_ONLY___ ═
296
+ 2024-10-14 13:22:20,642 INFO ___FILE_ONLY___ ═
297
+ 2024-10-14 13:22:20,643 INFO ___FILE_ONLY___ ═
298
+ 2024-10-14 13:22:20,645 INFO ___FILE_ONLY___ ═
299
+ 2024-10-14 13:22:20,645 INFO ___FILE_ONLY___ ╝
300
+
301
+ 2024-10-14 13:22:20,647 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
302
+
303
+ 2024-10-14 13:22:20,647 INFO ___FILE_ONLY___ ╠═ Downloading: Cloud Storage Command Line Tool ═╣
304
+
305
+ 2024-10-14 13:22:20,647 INFO ___FILE_ONLY___ ╚
306
+ 2024-10-14 13:22:20,656 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
307
+ 2024-10-14 13:22:20,721 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-20240614142823.tar.gz HTTP/1.1" 200 11883175
308
+ 2024-10-14 13:22:20,756 INFO ___FILE_ONLY___ ═
309
+ 2024-10-14 13:22:20,757 INFO ___FILE_ONLY___ ═
310
+ 2024-10-14 13:22:20,757 INFO ___FILE_ONLY___ ═
311
+ 2024-10-14 13:22:20,757 INFO ___FILE_ONLY___ ═
312
+ 2024-10-14 13:22:20,758 INFO ___FILE_ONLY___ ═
313
+ 2024-10-14 13:22:20,758 INFO ___FILE_ONLY___ ═
314
+ 2024-10-14 13:22:20,758 INFO ___FILE_ONLY___ ═
315
+ 2024-10-14 13:22:20,759 INFO ___FILE_ONLY___ ═
316
+ 2024-10-14 13:22:20,759 INFO ___FILE_ONLY___ ═
317
+ 2024-10-14 13:22:20,759 INFO ___FILE_ONLY___ ═
318
+ 2024-10-14 13:22:20,760 INFO ___FILE_ONLY___ ═
319
+ 2024-10-14 13:22:20,760 INFO ___FILE_ONLY___ ═
320
+ 2024-10-14 13:22:20,760 INFO ___FILE_ONLY___ ═
321
+ 2024-10-14 13:22:20,760 INFO ___FILE_ONLY___ ═
322
+ 2024-10-14 13:22:20,761 INFO ___FILE_ONLY___ ═
323
+ 2024-10-14 13:22:20,761 INFO ___FILE_ONLY___ ═
324
+ 2024-10-14 13:22:20,761 INFO ___FILE_ONLY___ ═
325
+ 2024-10-14 13:22:20,762 INFO ___FILE_ONLY___ ═
326
+ 2024-10-14 13:22:20,762 INFO ___FILE_ONLY___ ═
327
+ 2024-10-14 13:22:20,762 INFO ___FILE_ONLY___ ═
328
+ 2024-10-14 13:22:20,762 INFO ___FILE_ONLY___ ═
329
+ 2024-10-14 13:22:20,763 INFO ___FILE_ONLY___ ═
330
+ 2024-10-14 13:22:20,763 INFO ___FILE_ONLY___ ═
331
+ 2024-10-14 13:22:20,763 INFO ___FILE_ONLY___ ═
332
+ 2024-10-14 13:22:20,764 INFO ___FILE_ONLY___ ═
333
+ 2024-10-14 13:22:20,764 INFO ___FILE_ONLY___ ═
334
+ 2024-10-14 13:22:20,764 INFO ___FILE_ONLY___ ═
335
+ 2024-10-14 13:22:20,764 INFO ___FILE_ONLY___ ═
336
+ 2024-10-14 13:22:20,765 INFO ___FILE_ONLY___ ═
337
+ 2024-10-14 13:22:20,765 INFO ___FILE_ONLY___ ═
338
+ 2024-10-14 13:22:20,765 INFO ___FILE_ONLY___ ═
339
+ 2024-10-14 13:22:20,766 INFO ___FILE_ONLY___ ═
340
+ 2024-10-14 13:22:20,766 INFO ___FILE_ONLY___ ═
341
+ 2024-10-14 13:22:20,766 INFO ___FILE_ONLY___ ═
342
+ 2024-10-14 13:22:20,767 INFO ___FILE_ONLY___ ═
343
+ 2024-10-14 13:22:20,767 INFO ___FILE_ONLY___ ═
344
+ 2024-10-14 13:22:20,767 INFO ___FILE_ONLY___ ═
345
+ 2024-10-14 13:22:20,767 INFO ___FILE_ONLY___ ═
346
+ 2024-10-14 13:22:20,768 INFO ___FILE_ONLY___ ═
347
+ 2024-10-14 13:22:20,768 INFO ___FILE_ONLY___ ═
348
+ 2024-10-14 13:22:20,768 INFO ___FILE_ONLY___ ═
349
+ 2024-10-14 13:22:20,769 INFO ___FILE_ONLY___ ═
350
+ 2024-10-14 13:22:20,769 INFO ___FILE_ONLY___ ═
351
+ 2024-10-14 13:22:20,769 INFO ___FILE_ONLY___ ═
352
+ 2024-10-14 13:22:20,769 INFO ___FILE_ONLY___ ═
353
+ 2024-10-14 13:22:20,770 INFO ___FILE_ONLY___ ═
354
+ 2024-10-14 13:22:20,770 INFO ___FILE_ONLY___ ═
355
+ 2024-10-14 13:22:20,770 INFO ___FILE_ONLY___ ═
356
+ 2024-10-14 13:22:20,771 INFO ___FILE_ONLY___ ═
357
+ 2024-10-14 13:22:20,771 INFO ___FILE_ONLY___ ═
358
+ 2024-10-14 13:22:20,771 INFO ___FILE_ONLY___ ═
359
+ 2024-10-14 13:22:20,771 INFO ___FILE_ONLY___ ═
360
+ 2024-10-14 13:22:20,772 INFO ___FILE_ONLY___ ═
361
+ 2024-10-14 13:22:20,772 INFO ___FILE_ONLY___ ═
362
+ 2024-10-14 13:22:20,772 INFO ___FILE_ONLY___ ═
363
+ 2024-10-14 13:22:20,773 INFO ___FILE_ONLY___ ═
364
+ 2024-10-14 13:22:20,773 INFO ___FILE_ONLY___ ═
365
+ 2024-10-14 13:22:20,773 INFO ___FILE_ONLY___ ═
366
+ 2024-10-14 13:22:20,773 INFO ___FILE_ONLY___ ═
367
+ 2024-10-14 13:22:20,774 INFO ___FILE_ONLY___ ═
368
+ 2024-10-14 13:22:20,774 INFO ___FILE_ONLY___ ╝
369
+
370
+ 2024-10-14 13:22:20,776 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
371
+
372
+ 2024-10-14 13:22:20,776 INFO ___FILE_ONLY___ ╠═ Downloading: Cloud Storage Command Line Tool (Platfor... ═╣
373
+
374
+ 2024-10-14 13:22:20,776 INFO ___FILE_ONLY___ ╚
375
+ 2024-10-14 13:22:20,779 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
376
+ 2024-10-14 13:22:20,843 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-nix-20240830134514.tar.gz HTTP/1.1" 200 1928
377
+ 2024-10-14 13:22:20,844 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
378
+ 2024-10-14 13:22:20,844 INFO ___FILE_ONLY___ ╝
379
+
380
+ 2024-10-14 13:22:20,846 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
381
+
382
+ 2024-10-14 13:22:20,846 INFO ___FILE_ONLY___ ╠═ Downloading: Default set of gcloud commands ═╣
383
+
384
+ 2024-10-14 13:22:20,846 INFO ___FILE_ONLY___ ╚
385
+ 2024-10-14 13:22:20,846 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
386
+ 2024-10-14 13:22:20,846 INFO ___FILE_ONLY___ ╝
387
+
388
+ 2024-10-14 13:22:20,848 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
389
+
390
+ 2024-10-14 13:22:20,848 INFO ___FILE_ONLY___ ╠═ Downloading: Google Cloud CLI Core Libraries (Platfor... ═╣
391
+
392
+ 2024-10-14 13:22:20,848 INFO ___FILE_ONLY___ ╚
393
+ 2024-10-14 13:22:20,851 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
394
+ 2024-10-14 13:22:20,913 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-core-nix-20240830134514.tar.gz HTTP/1.1" 200 2306
395
+ 2024-10-14 13:22:20,913 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
396
+ 2024-10-14 13:22:20,914 INFO ___FILE_ONLY___ ╝
397
+
398
+ 2024-10-14 13:22:20,916 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
399
+
400
+ 2024-10-14 13:22:20,916 INFO ___FILE_ONLY___ ╠═ Downloading: Google Cloud CRC32C Hash Tool ═╣
401
+
402
+ 2024-10-14 13:22:20,916 INFO ___FILE_ONLY___ ╚
403
+ 2024-10-14 13:22:20,916 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
404
+ 2024-10-14 13:22:20,916 INFO ___FILE_ONLY___ ╝
405
+
406
+ 2024-10-14 13:22:20,918 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
407
+
408
+ 2024-10-14 13:22:20,918 INFO ___FILE_ONLY___ ╠═ Downloading: Google Cloud CRC32C Hash Tool (Platform ... ═╣
409
+
410
+ 2024-10-14 13:22:20,918 INFO ___FILE_ONLY___ ╚
411
+ 2024-10-14 13:22:20,921 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
412
+ 2024-10-14 13:22:20,987 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-crc32c-linux-x86_64-20240712142834.tar.gz HTTP/1.1" 200 1350263
413
+ 2024-10-14 13:22:20,996 INFO ___FILE_ONLY___ ═
414
+ 2024-10-14 13:22:20,996 INFO ___FILE_ONLY___ ═
415
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
416
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
417
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
418
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
419
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
420
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
421
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
422
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
423
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
424
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
425
+ 2024-10-14 13:22:20,997 INFO ___FILE_ONLY___ ═
426
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
427
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
428
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
429
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
430
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
431
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
432
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
433
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
434
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
435
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
436
+ 2024-10-14 13:22:20,998 INFO ___FILE_ONLY___ ═
437
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
438
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
439
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
440
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
441
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
442
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
443
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
444
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
445
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
446
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
447
+ 2024-10-14 13:22:20,999 INFO ___FILE_ONLY___ ═
448
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
449
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
450
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
451
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
452
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
453
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
454
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
455
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
456
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
457
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
458
+ 2024-10-14 13:22:21,000 INFO ___FILE_ONLY___ ═
459
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
460
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
461
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
462
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
463
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
464
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
465
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
466
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
467
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
468
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
469
+ 2024-10-14 13:22:21,001 INFO ___FILE_ONLY___ ═
470
+ 2024-10-14 13:22:21,002 INFO ___FILE_ONLY___ ═
471
+ 2024-10-14 13:22:21,002 INFO ___FILE_ONLY___ ═
472
+ 2024-10-14 13:22:21,002 INFO ___FILE_ONLY___ ═
473
+ 2024-10-14 13:22:21,002 INFO ___FILE_ONLY___ ╝
474
+
475
+ 2024-10-14 13:22:21,004 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
476
+
477
+ 2024-10-14 13:22:21,004 INFO ___FILE_ONLY___ ╠═ Downloading: gcloud cli dependencies (Platform Specific) ═╣
478
+
479
+ 2024-10-14 13:22:21,004 INFO ___FILE_ONLY___ ╚
480
+ 2024-10-14 13:22:21,007 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
481
+ 2024-10-14 13:22:21,095 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-deps-linux-x86_64-20210416153011.tar.gz HTTP/1.1" 200 104
482
+ 2024-10-14 13:22:21,096 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
483
+ 2024-10-14 13:22:21,096 INFO ___FILE_ONLY___ ╝
484
+
485
+ 2024-10-14 13:22:21,098 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
486
+
487
+ 2024-10-14 13:22:21,098 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool ═╣
488
+
489
+ 2024-10-14 13:22:21,098 INFO ___FILE_ONLY___ ╚
490
+ 2024-10-14 13:22:21,216 INFO ___FILE_ONLY___ ═
491
+ 2024-10-14 13:22:21,218 INFO ___FILE_ONLY___ ═
492
+ 2024-10-14 13:22:21,221 INFO ___FILE_ONLY___ ═
493
+ 2024-10-14 13:22:21,224 INFO ___FILE_ONLY___ ═
494
+ 2024-10-14 13:22:21,226 INFO ___FILE_ONLY___ ═
495
+ 2024-10-14 13:22:21,229 INFO ___FILE_ONLY___ ═
496
+ 2024-10-14 13:22:21,231 INFO ___FILE_ONLY___ ═
497
+ 2024-10-14 13:22:21,233 INFO ___FILE_ONLY___ ═
498
+ 2024-10-14 13:22:21,236 INFO ___FILE_ONLY___ ═
499
+ 2024-10-14 13:22:21,238 INFO ___FILE_ONLY___ ═
500
+ 2024-10-14 13:22:21,240 INFO ___FILE_ONLY___ ═
501
+ 2024-10-14 13:22:21,242 INFO ___FILE_ONLY___ ═
502
+ 2024-10-14 13:22:21,245 INFO ___FILE_ONLY___ ═
503
+ 2024-10-14 13:22:21,247 INFO ___FILE_ONLY___ ═
504
+ 2024-10-14 13:22:21,249 INFO ___FILE_ONLY___ ═
505
+ 2024-10-14 13:22:21,251 INFO ___FILE_ONLY___ ═
506
+ 2024-10-14 13:22:21,253 INFO ___FILE_ONLY___ ═
507
+ 2024-10-14 13:22:21,257 INFO ___FILE_ONLY___ ═
508
+ 2024-10-14 13:22:21,259 INFO ___FILE_ONLY___ ═
509
+ 2024-10-14 13:22:21,262 INFO ___FILE_ONLY___ ═
510
+ 2024-10-14 13:22:21,264 INFO ___FILE_ONLY___ ═
511
+ 2024-10-14 13:22:21,266 INFO ___FILE_ONLY___ ═
512
+ 2024-10-14 13:22:21,268 INFO ___FILE_ONLY___ ═
513
+ 2024-10-14 13:22:21,270 INFO ___FILE_ONLY___ ═
514
+ 2024-10-14 13:22:21,272 INFO ___FILE_ONLY___ ═
515
+ 2024-10-14 13:22:21,274 INFO ___FILE_ONLY___ ═
516
+ 2024-10-14 13:22:21,277 INFO ___FILE_ONLY___ ═
517
+ 2024-10-14 13:22:21,279 INFO ___FILE_ONLY___ ═
518
+ 2024-10-14 13:22:21,283 INFO ___FILE_ONLY___ ═
519
+ 2024-10-14 13:22:21,285 INFO ___FILE_ONLY___ ═
520
+ 2024-10-14 13:22:21,287 INFO ___FILE_ONLY___ ═
521
+ 2024-10-14 13:22:21,289 INFO ___FILE_ONLY___ ═
522
+ 2024-10-14 13:22:21,292 INFO ___FILE_ONLY___ ═
523
+ 2024-10-14 13:22:21,296 INFO ___FILE_ONLY___ ═
524
+ 2024-10-14 13:22:21,305 INFO ___FILE_ONLY___ ═
525
+ 2024-10-14 13:22:21,306 INFO ___FILE_ONLY___ ═
526
+ 2024-10-14 13:22:21,309 INFO ___FILE_ONLY___ ═
527
+ 2024-10-14 13:22:21,314 INFO ___FILE_ONLY___ ═
528
+ 2024-10-14 13:22:21,316 INFO ___FILE_ONLY___ ═
529
+ 2024-10-14 13:22:21,318 INFO ___FILE_ONLY___ ═
530
+ 2024-10-14 13:22:21,321 INFO ___FILE_ONLY___ ═
531
+ 2024-10-14 13:22:21,323 INFO ___FILE_ONLY___ ═
532
+ 2024-10-14 13:22:21,327 INFO ___FILE_ONLY___ ═
533
+ 2024-10-14 13:22:21,330 INFO ___FILE_ONLY___ ═
534
+ 2024-10-14 13:22:21,332 INFO ___FILE_ONLY___ ═
535
+ 2024-10-14 13:22:21,334 INFO ___FILE_ONLY___ ═
536
+ 2024-10-14 13:22:21,336 INFO ___FILE_ONLY___ ═
537
+ 2024-10-14 13:22:21,338 INFO ___FILE_ONLY___ ═
538
+ 2024-10-14 13:22:21,340 INFO ___FILE_ONLY___ ═
539
+ 2024-10-14 13:22:21,343 INFO ___FILE_ONLY___ ═
540
+ 2024-10-14 13:22:21,345 INFO ___FILE_ONLY___ ═
541
+ 2024-10-14 13:22:21,347 INFO ___FILE_ONLY___ ═
542
+ 2024-10-14 13:22:21,349 INFO ___FILE_ONLY___ ═
543
+ 2024-10-14 13:22:21,351 INFO ___FILE_ONLY___ ═
544
+ 2024-10-14 13:22:21,354 INFO ___FILE_ONLY___ ═
545
+ 2024-10-14 13:22:21,355 INFO ___FILE_ONLY___ ═
546
+ 2024-10-14 13:22:21,357 INFO ___FILE_ONLY___ ═
547
+ 2024-10-14 13:22:21,360 INFO ___FILE_ONLY___ ═
548
+ 2024-10-14 13:22:21,361 INFO ___FILE_ONLY___ ═
549
+ 2024-10-14 13:22:21,364 INFO ___FILE_ONLY___ ═
550
+ 2024-10-14 13:22:21,364 INFO ___FILE_ONLY___ ╝
551
+
552
+ 2024-10-14 13:22:21,376 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
553
+
554
+ 2024-10-14 13:22:21,376 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool (Platform Spec... ═╣
555
+
556
+ 2024-10-14 13:22:21,377 INFO ___FILE_ONLY___ ╚
557
+ 2024-10-14 13:22:21,377 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
558
+ 2024-10-14 13:22:21,377 INFO ___FILE_ONLY___ ╝
559
+
560
+ 2024-10-14 13:22:21,384 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
561
+
562
+ 2024-10-14 13:22:21,384 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 ═╣
563
+
564
+ 2024-10-14 13:22:21,384 INFO ___FILE_ONLY___ ╚
565
+ 2024-10-14 13:22:21,388 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
566
+ 2024-10-14 13:22:21,388 INFO ___FILE_ONLY___ ╝
567
+
568
+ 2024-10-14 13:22:21,390 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
569
+
570
+ 2024-10-14 13:22:21,390 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 (Platform Specific) ═╣
571
+
572
+ 2024-10-14 13:22:21,390 INFO ___FILE_ONLY___ ╚
573
+ 2024-10-14 13:22:23,509 INFO ___FILE_ONLY___ ═
574
+ 2024-10-14 13:22:23,524 INFO ___FILE_ONLY___ ═
575
+ 2024-10-14 13:22:23,538 INFO ___FILE_ONLY___ ═
576
+ 2024-10-14 13:22:23,551 INFO ___FILE_ONLY___ ═
577
+ 2024-10-14 13:22:23,563 INFO ___FILE_ONLY___ ═
578
+ 2024-10-14 13:22:23,576 INFO ___FILE_ONLY___ ═
579
+ 2024-10-14 13:22:23,588 INFO ___FILE_ONLY___ ═
580
+ 2024-10-14 13:22:23,601 INFO ___FILE_ONLY___ ═
581
+ 2024-10-14 13:22:23,613 INFO ___FILE_ONLY___ ═
582
+ 2024-10-14 13:22:23,625 INFO ___FILE_ONLY___ ═
583
+ 2024-10-14 13:22:23,637 INFO ___FILE_ONLY___ ═
584
+ 2024-10-14 13:22:23,649 INFO ___FILE_ONLY___ ═
585
+ 2024-10-14 13:22:23,662 INFO ___FILE_ONLY___ ═
586
+ 2024-10-14 13:22:23,674 INFO ___FILE_ONLY___ ═
587
+ 2024-10-14 13:22:23,686 INFO ___FILE_ONLY___ ═
588
+ 2024-10-14 13:22:23,699 INFO ___FILE_ONLY___ ═
589
+ 2024-10-14 13:22:23,711 INFO ___FILE_ONLY___ ═
590
+ 2024-10-14 13:22:23,723 INFO ___FILE_ONLY___ ═
591
+ 2024-10-14 13:22:23,735 INFO ___FILE_ONLY___ ═
592
+ 2024-10-14 13:22:23,747 INFO ___FILE_ONLY___ ═
593
+ 2024-10-14 13:22:23,760 INFO ___FILE_ONLY___ ═
594
+ 2024-10-14 13:22:23,772 INFO ___FILE_ONLY___ ═
595
+ 2024-10-14 13:22:23,784 INFO ___FILE_ONLY___ ═
596
+ 2024-10-14 13:22:23,797 INFO ___FILE_ONLY___ ═
597
+ 2024-10-14 13:22:23,809 INFO ___FILE_ONLY___ ═
598
+ 2024-10-14 13:22:23,821 INFO ___FILE_ONLY___ ═
599
+ 2024-10-14 13:22:23,833 INFO ___FILE_ONLY___ ═
600
+ 2024-10-14 13:22:23,846 INFO ___FILE_ONLY___ ═
601
+ 2024-10-14 13:22:23,858 INFO ___FILE_ONLY___ ═
602
+ 2024-10-14 13:22:23,870 INFO ___FILE_ONLY___ ═
603
+ 2024-10-14 13:22:23,882 INFO ___FILE_ONLY___ ═
604
+ 2024-10-14 13:22:23,894 INFO ___FILE_ONLY___ ═
605
+ 2024-10-14 13:22:23,906 INFO ___FILE_ONLY___ ═
606
+ 2024-10-14 13:22:23,918 INFO ___FILE_ONLY___ ═
607
+ 2024-10-14 13:22:23,931 INFO ___FILE_ONLY___ ═
608
+ 2024-10-14 13:22:23,943 INFO ___FILE_ONLY___ ═
609
+ 2024-10-14 13:22:24,681 INFO ___FILE_ONLY___ ═
610
+ 2024-10-14 13:22:24,706 INFO ___FILE_ONLY___ ═
611
+ 2024-10-14 13:22:24,730 INFO ___FILE_ONLY___ ═
612
+ 2024-10-14 13:22:24,750 INFO ___FILE_ONLY___ ═
613
+ 2024-10-14 13:22:24,770 INFO ___FILE_ONLY___ ═
614
+ 2024-10-14 13:22:24,907 INFO ___FILE_ONLY___ ═
615
+ 2024-10-14 13:22:24,934 INFO ___FILE_ONLY___ ═
616
+ 2024-10-14 13:22:24,953 INFO ___FILE_ONLY___ ═
617
+ 2024-10-14 13:22:24,969 INFO ___FILE_ONLY___ ═
618
+ 2024-10-14 13:22:24,987 INFO ___FILE_ONLY___ ═
619
+ 2024-10-14 13:22:25,016 INFO ___FILE_ONLY___ ═
620
+ 2024-10-14 13:22:25,037 INFO ___FILE_ONLY___ ═
621
+ 2024-10-14 13:22:25,055 INFO ___FILE_ONLY___ ═
622
+ 2024-10-14 13:22:25,069 INFO ___FILE_ONLY___ ═
623
+ 2024-10-14 13:22:25,161 INFO ___FILE_ONLY___ ═
624
+ 2024-10-14 13:22:25,180 INFO ___FILE_ONLY___ ═
625
+ 2024-10-14 13:22:25,200 INFO ___FILE_ONLY___ ═
626
+ 2024-10-14 13:22:25,218 INFO ___FILE_ONLY___ ═
627
+ 2024-10-14 13:22:25,261 INFO ___FILE_ONLY___ ═
628
+ 2024-10-14 13:22:25,626 INFO ___FILE_ONLY___ ═
629
+ 2024-10-14 13:22:25,645 INFO ___FILE_ONLY___ ═
630
+ 2024-10-14 13:22:25,666 INFO ___FILE_ONLY___ ═
631
+ 2024-10-14 13:22:25,684 INFO ___FILE_ONLY___ ═
632
+ 2024-10-14 13:22:26,050 INFO ___FILE_ONLY___ ═
633
+ 2024-10-14 13:22:26,051 INFO ___FILE_ONLY___ ╝
634
+
635
+ 2024-10-14 13:22:26,128 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
636
+
637
+ 2024-10-14 13:22:26,128 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool ═╣
638
+
639
+ 2024-10-14 13:22:26,128 INFO ___FILE_ONLY___ ╚
640
+ 2024-10-14 13:22:26,759 INFO ___FILE_ONLY___ ═
641
+ 2024-10-14 13:22:26,771 INFO ___FILE_ONLY___ ═
642
+ 2024-10-14 13:22:26,784 INFO ___FILE_ONLY___ ═
643
+ 2024-10-14 13:22:26,802 INFO ___FILE_ONLY___ ═
644
+ 2024-10-14 13:22:26,818 INFO ___FILE_ONLY___ ═
645
+ 2024-10-14 13:22:26,834 INFO ___FILE_ONLY___ ═
646
+ 2024-10-14 13:22:26,848 INFO ___FILE_ONLY___ ═
647
+ 2024-10-14 13:22:26,870 INFO ___FILE_ONLY___ ═
648
+ 2024-10-14 13:22:26,882 INFO ___FILE_ONLY___ ═
649
+ 2024-10-14 13:22:26,892 INFO ___FILE_ONLY___ ═
650
+ 2024-10-14 13:22:26,903 INFO ___FILE_ONLY___ ═
651
+ 2024-10-14 13:22:26,940 INFO ___FILE_ONLY___ ═
652
+ 2024-10-14 13:22:26,951 INFO ___FILE_ONLY___ ═
653
+ 2024-10-14 13:22:26,963 INFO ___FILE_ONLY___ ═
654
+ 2024-10-14 13:22:26,976 INFO ___FILE_ONLY___ ═
655
+ 2024-10-14 13:22:26,984 INFO ___FILE_ONLY___ ═
656
+ 2024-10-14 13:22:26,993 INFO ___FILE_ONLY___ ═
657
+ 2024-10-14 13:22:27,004 INFO ___FILE_ONLY___ ═
658
+ 2024-10-14 13:22:27,013 INFO ___FILE_ONLY___ ═
659
+ 2024-10-14 13:22:27,023 INFO ___FILE_ONLY___ ═
660
+ 2024-10-14 13:22:27,033 INFO ___FILE_ONLY___ ═
661
+ 2024-10-14 13:22:27,042 INFO ___FILE_ONLY___ ═
662
+ 2024-10-14 13:22:27,052 INFO ___FILE_ONLY___ ═
663
+ 2024-10-14 13:22:27,065 INFO ___FILE_ONLY___ ═
664
+ 2024-10-14 13:22:27,074 INFO ___FILE_ONLY___ ═
665
+ 2024-10-14 13:22:27,086 INFO ___FILE_ONLY___ ═
666
+ 2024-10-14 13:22:27,099 INFO ___FILE_ONLY___ ═
667
+ 2024-10-14 13:22:27,111 INFO ___FILE_ONLY___ ═
668
+ 2024-10-14 13:22:27,123 INFO ___FILE_ONLY___ ═
669
+ 2024-10-14 13:22:27,141 INFO ___FILE_ONLY___ ═
670
+ 2024-10-14 13:22:27,152 INFO ___FILE_ONLY___ ═
671
+ 2024-10-14 13:22:27,163 INFO ___FILE_ONLY___ ═
672
+ 2024-10-14 13:22:27,173 INFO ___FILE_ONLY___ ═
673
+ 2024-10-14 13:22:27,188 INFO ___FILE_ONLY___ ═
674
+ 2024-10-14 13:22:27,198 INFO ___FILE_ONLY___ ═
675
+ 2024-10-14 13:22:27,209 INFO ___FILE_ONLY___ ═
676
+ 2024-10-14 13:22:27,224 INFO ___FILE_ONLY___ ═
677
+ 2024-10-14 13:22:27,239 INFO ___FILE_ONLY___ ═
678
+ 2024-10-14 13:22:27,252 INFO ___FILE_ONLY___ ═
679
+ 2024-10-14 13:22:27,269 INFO ___FILE_ONLY___ ═
680
+ 2024-10-14 13:22:27,283 INFO ___FILE_ONLY___ ═
681
+ 2024-10-14 13:22:27,294 INFO ___FILE_ONLY___ ═
682
+ 2024-10-14 13:22:27,304 INFO ___FILE_ONLY___ ═
683
+ 2024-10-14 13:22:27,318 INFO ___FILE_ONLY___ ═
684
+ 2024-10-14 13:22:27,332 INFO ___FILE_ONLY___ ═
685
+ 2024-10-14 13:22:27,346 INFO ___FILE_ONLY___ ═
686
+ 2024-10-14 13:22:27,360 INFO ___FILE_ONLY___ ═
687
+ 2024-10-14 13:22:27,372 INFO ___FILE_ONLY___ ═
688
+ 2024-10-14 13:22:27,386 INFO ___FILE_ONLY___ ═
689
+ 2024-10-14 13:22:27,398 INFO ___FILE_ONLY___ ═
690
+ 2024-10-14 13:22:27,408 INFO ___FILE_ONLY___ ═
691
+ 2024-10-14 13:22:27,419 INFO ___FILE_ONLY___ ═
692
+ 2024-10-14 13:22:27,431 INFO ___FILE_ONLY___ ═
693
+ 2024-10-14 13:22:27,441 INFO ___FILE_ONLY___ ═
694
+ 2024-10-14 13:22:27,451 INFO ___FILE_ONLY___ ═
695
+ 2024-10-14 13:22:27,461 INFO ___FILE_ONLY___ ═
696
+ 2024-10-14 13:22:27,471 INFO ___FILE_ONLY___ ═
697
+ 2024-10-14 13:22:27,492 INFO ___FILE_ONLY___ ═
698
+ 2024-10-14 13:22:27,509 INFO ___FILE_ONLY___ ═
699
+ 2024-10-14 13:22:27,526 INFO ___FILE_ONLY___ ═
700
+ 2024-10-14 13:22:27,527 INFO ___FILE_ONLY___ ╝
701
+
702
+ 2024-10-14 13:22:27,579 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
703
+
704
+ 2024-10-14 13:22:27,579 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool (Platform... ═╣
705
+
706
+ 2024-10-14 13:22:27,579 INFO ___FILE_ONLY___ ╚
707
+ 2024-10-14 13:22:27,580 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
708
+ 2024-10-14 13:22:27,580 INFO ___FILE_ONLY___ ╝
709
+
710
+ 2024-10-14 13:22:27,586 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
711
+
712
+ 2024-10-14 13:22:27,587 INFO ___FILE_ONLY___ ╠═ Installing: Default set of gcloud commands ═╣
713
+
714
+ 2024-10-14 13:22:27,587 INFO ___FILE_ONLY___ ╚
715
+ 2024-10-14 13:22:27,591 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
716
+ 2024-10-14 13:22:27,591 INFO ___FILE_ONLY___ ╝
717
+
718
+ 2024-10-14 13:22:27,593 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
719
+
720
+ 2024-10-14 13:22:27,593 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CLI Core Libraries (Platform... ═╣
721
+
722
+ 2024-10-14 13:22:27,593 INFO ___FILE_ONLY___ ╚
723
+ 2024-10-14 13:22:27,594 INFO ___FILE_ONLY___ ══════════════════════════════
724
+ 2024-10-14 13:22:27,594 INFO ___FILE_ONLY___ ══════════════════════════════
725
+ 2024-10-14 13:22:27,594 INFO ___FILE_ONLY___ ╝
726
+
727
+ 2024-10-14 13:22:27,600 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
728
+
729
+ 2024-10-14 13:22:27,600 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
730
+
731
+ 2024-10-14 13:22:27,600 INFO ___FILE_ONLY___ ╚
732
+ 2024-10-14 13:22:27,604 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
733
+ 2024-10-14 13:22:27,604 INFO ___FILE_ONLY___ ╝
734
+
735
+ 2024-10-14 13:22:27,606 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
736
+
737
+ 2024-10-14 13:22:27,606 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool (Platform S... ═╣
738
+
739
+ 2024-10-14 13:22:27,606 INFO ___FILE_ONLY___ ╚
740
+ 2024-10-14 13:22:27,642 INFO ___FILE_ONLY___ ══════════════════════════════
741
+ 2024-10-14 13:22:27,642 INFO ___FILE_ONLY___ ══════════════════════════════
742
+ 2024-10-14 13:22:27,642 INFO ___FILE_ONLY___ ╝
743
+
744
+ 2024-10-14 13:22:27,648 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
745
+
746
+ 2024-10-14 13:22:27,648 INFO ___FILE_ONLY___ ╠═ Installing: gcloud cli dependencies (Platform Specific) ═╣
747
+
748
+ 2024-10-14 13:22:27,649 INFO ___FILE_ONLY___ ╚
749
+ 2024-10-14 13:22:27,649 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
750
+ 2024-10-14 13:22:27,649 INFO ___FILE_ONLY___ ╝
751
+
752
+ 2024-10-14 13:22:27,656 DEBUG root Updating notification cache...
753
+ 2024-10-14 13:22:27,657 INFO ___FILE_ONLY___
754
+
755
+ 2024-10-14 13:22:27,659 INFO ___FILE_ONLY___ Performing post processing steps...
756
+ 2024-10-14 13:22:27,659 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
757
+ 2024-10-14 13:22:37,624 DEBUG ___FILE_ONLY___
758
+ 2024-10-14 13:22:37,624 DEBUG ___FILE_ONLY___
759
+ 2024-10-14 13:22:37,702 INFO ___FILE_ONLY___
760
+ Update done!
761
+
762
+
763
+ 2024-10-14 13:22:37,705 DEBUG root Chosen display Format:none
764
+ 2024-10-14 13:22:37,706 INFO root Display format: "none"
.config/logs/2024.10.14/13.22.28.151005.log ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ 2024-10-14 13:22:28,151 DEBUG root Loaded Command Group: ['gcloud', 'components']
2
+ 2024-10-14 13:22:28,153 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
3
+ 2024-10-14 13:22:28,156 DEBUG root Running [gcloud.components.post-process] with arguments: []
4
+ 2024-10-14 13:22:37,540 DEBUG root Chosen display Format:none
5
+ 2024-10-14 13:22:37,541 INFO root Display format: "none"
.config/logs/2024.10.14/13.22.38.263602.log ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-10-14 13:22:38,264 DEBUG root Loaded Command Group: ['gcloud', 'components']
2
+ 2024-10-14 13:22:38,267 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
3
+ 2024-10-14 13:22:38,270 DEBUG root Running [gcloud.components.update] with arguments: [--quiet: "True", COMPONENT-IDS:8: "['gcloud', 'core', 'bq', 'gsutil', 'compute', 'preview', 'alpha', 'beta']"]
4
+ 2024-10-14 13:22:38,272 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
5
+
6
+ 2024-10-14 13:22:38,282 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
7
+ 2024-10-14 13:22:38,367 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 227002
8
+ 2024-10-14 13:22:38,387 WARNING root Component [preview] no longer exists.
9
+ 2024-10-14 13:22:38,387 WARNING root Component [compute] no longer exists.
10
+ 2024-10-14 13:22:38,389 INFO ___FILE_ONLY___
11
+
12
+ 2024-10-14 13:22:38,389 INFO ___FILE_ONLY___
13
+ Your current Google Cloud CLI version is: 496.0.0
14
+
15
+ 2024-10-14 13:22:38,390 INFO ___FILE_ONLY___ Installing components from version: 496.0.0
16
+
17
+ 2024-10-14 13:22:38,390 INFO ___FILE_ONLY___
18
+
19
+ 2024-10-14 13:22:38,390 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
20
+ 2024-10-14 13:22:38,391 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
21
+ 2024-10-14 13:22:38,392 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
22
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___ ┌──────────────────────────────────────────────┐
23
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___
24
+
25
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___ │ These components will be installed. │
26
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___
27
+
28
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___ ├───────────────────────┬────────────┬─────────┤
29
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___
30
+
31
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
32
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___
33
+
34
+ 2024-10-14 13:22:38,412 INFO ___FILE_ONLY___ ├───────────────────────┼────────────┼─────────┤
35
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___
36
+
37
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ │
38
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ gcloud Alpha Commands
39
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___
40
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ │
41
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ 2024.10.04
42
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___
43
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ │
44
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ < 1 MiB
45
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___
46
+ 2024-10-14 13:22:38,413 INFO ___FILE_ONLY___ │
47
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___
48
+
49
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ │
50
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ gcloud Beta Commands
51
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___
52
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ │
53
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ 2024.10.04
54
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___
55
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ │
56
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ < 1 MiB
57
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___
58
+ 2024-10-14 13:22:38,414 INFO ___FILE_ONLY___ │
59
+ 2024-10-14 13:22:38,415 INFO ___FILE_ONLY___
60
+
61
+ 2024-10-14 13:22:38,415 INFO ___FILE_ONLY___ └───────────────────────┴────────────┴─────────┘
62
+ 2024-10-14 13:22:38,415 INFO ___FILE_ONLY___
63
+
64
+ 2024-10-14 13:22:38,415 INFO ___FILE_ONLY___
65
+
66
+ 2024-10-14 13:22:38,418 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
67
+ 2024-10-14 13:22:38,488 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1289001
68
+ 2024-10-14 13:22:38,524 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
69
+ https://cloud.google.com/sdk/release_notes
70
+
71
+
72
+ 2024-10-14 13:22:38,525 INFO ___FILE_ONLY___ Performing in place update...
73
+
74
+
75
+ 2024-10-14 13:22:38,527 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
76
+
77
+ 2024-10-14 13:22:38,527 INFO ___FILE_ONLY___ ╠═ Downloading: gcloud Alpha Commands ═╣
78
+
79
+ 2024-10-14 13:22:38,527 INFO ___FILE_ONLY___ ╚
80
+ 2024-10-14 13:22:38,530 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
81
+ 2024-10-14 13:22:38,591 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-alpha-20241004144944.tar.gz HTTP/1.1" 200 800
82
+ 2024-10-14 13:22:38,592 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
83
+ 2024-10-14 13:22:38,592 INFO ___FILE_ONLY___ ╝
84
+
85
+ 2024-10-14 13:22:38,594 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
86
+
87
+ 2024-10-14 13:22:38,594 INFO ___FILE_ONLY___ ╠═ Downloading: gcloud Beta Commands ═╣
88
+
89
+ 2024-10-14 13:22:38,594 INFO ___FILE_ONLY___ ╚
90
+ 2024-10-14 13:22:38,597 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
91
+ 2024-10-14 13:22:38,675 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-beta-20241004144944.tar.gz HTTP/1.1" 200 797
92
+ 2024-10-14 13:22:38,675 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
93
+ 2024-10-14 13:22:38,676 INFO ___FILE_ONLY___ ╝
94
+
95
+ 2024-10-14 13:22:38,677 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
96
+
97
+ 2024-10-14 13:22:38,678 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Alpha Commands ═╣
98
+
99
+ 2024-10-14 13:22:38,678 INFO ___FILE_ONLY___ ╚
100
+ 2024-10-14 13:22:38,679 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
101
+ 2024-10-14 13:22:38,679 INFO ___FILE_ONLY___ ╝
102
+
103
+ 2024-10-14 13:22:38,686 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
104
+
105
+ 2024-10-14 13:22:38,686 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Beta Commands ═╣
106
+
107
+ 2024-10-14 13:22:38,686 INFO ___FILE_ONLY___ ╚
108
+ 2024-10-14 13:22:38,687 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
109
+ 2024-10-14 13:22:38,687 INFO ___FILE_ONLY___ ╝
110
+
111
+ 2024-10-14 13:22:38,695 DEBUG root Updating notification cache...
112
+ 2024-10-14 13:22:38,695 INFO ___FILE_ONLY___
113
+
114
+ 2024-10-14 13:22:38,697 INFO ___FILE_ONLY___ Performing post processing steps...
115
+ 2024-10-14 13:22:38,697 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
116
+ 2024-10-14 13:22:48,925 DEBUG ___FILE_ONLY___
117
+ 2024-10-14 13:22:48,925 DEBUG ___FILE_ONLY___
118
+ 2024-10-14 13:22:48,970 INFO ___FILE_ONLY___
119
+ Update done!
120
+
121
+
122
+ 2024-10-14 13:22:48,973 DEBUG root Chosen display Format:none
123
+ 2024-10-14 13:22:48,973 INFO root Display format: "none"
.config/logs/2024.10.14/13.22.39.146829.log ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ 2024-10-14 13:22:39,147 DEBUG root Loaded Command Group: ['gcloud', 'components']
2
+ 2024-10-14 13:22:39,149 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
3
+ 2024-10-14 13:22:39,151 DEBUG root Running [gcloud.components.post-process] with arguments: []
4
+ 2024-10-14 13:22:48,837 DEBUG root Chosen display Format:none
5
+ 2024-10-14 13:22:48,838 INFO root Display format: "none"
.config/logs/2024.10.14/13.22.49.518111.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 2024-10-14 13:22:49,520 DEBUG root Loaded Command Group: ['gcloud', 'config']
2
+ 2024-10-14 13:22:49,569 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
3
+ 2024-10-14 13:22:49,571 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "component_manager/disable_update_check", VALUE: "true"]
4
+ 2024-10-14 13:22:49,572 INFO ___FILE_ONLY___ Updated property [component_manager/disable_update_check].
5
+
6
+ 2024-10-14 13:22:49,573 DEBUG root Chosen display Format:default
7
+ 2024-10-14 13:22:49,574 INFO root Display format: "default"
8
+ 2024-10-14 13:22:49,574 DEBUG root SDK update checks are disabled.
.config/logs/2024.10.14/13.22.50.121513.log ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 2024-10-14 13:22:50,123 DEBUG root Loaded Command Group: ['gcloud', 'config']
2
+ 2024-10-14 13:22:50,170 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
3
+ 2024-10-14 13:22:50,173 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "compute/gce_metadata_read_timeout_sec", VALUE: "0"]
4
+ 2024-10-14 13:22:50,174 INFO ___FILE_ONLY___ Updated property [compute/gce_metadata_read_timeout_sec].
5
+
6
+ 2024-10-14 13:22:50,175 DEBUG root Chosen display Format:default
7
+ 2024-10-14 13:22:50,175 INFO root Display format: "default"
8
+ 2024-10-14 13:22:50,176 DEBUG root SDK update checks are disabled.
.gitattributes copy ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
=0.26.0 ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Collecting accelerate
2
+ Downloading accelerate-1.0.1-py3-none-any.whl (330 kB)
3
+ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 330.9/330.9 kB 10.6 MB/s eta 0:00:00
4
+ Requirement already satisfied: torch>=1.10.0 in /usr/local/lib/python3.10/site-packages (from accelerate) (2.5.0)
5
+ Requirement already satisfied: numpy<3.0.0,>=1.17 in /usr/local/lib/python3.10/site-packages (from accelerate) (2.1.2)
6
+ Requirement already satisfied: huggingface-hub>=0.21.0 in /usr/local/lib/python3.10/site-packages (from accelerate) (0.26.0)
7
+ Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/site-packages (from accelerate) (6.0.2)
8
+ Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/site-packages (from accelerate) (24.1)
9
+ Requirement already satisfied: safetensors>=0.4.3 in /usr/local/lib/python3.10/site-packages (from accelerate) (0.4.5)
10
+ Requirement already satisfied: psutil in /usr/local/lib/python3.10/site-packages (from accelerate) (5.9.8)
11
+ Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/site-packages (from huggingface-hub>=0.21.0->accelerate) (2024.6.1)
12
+ Requirement already satisfied: requests in /usr/local/lib/python3.10/site-packages (from huggingface-hub>=0.21.0->accelerate) (2.32.3)
13
+ Requirement already satisfied: filelock in /usr/local/lib/python3.10/site-packages (from huggingface-hub>=0.21.0->accelerate) (3.16.1)
14
+ Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.10/site-packages (from huggingface-hub>=0.21.0->accelerate) (4.66.5)
15
+ Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/site-packages (from huggingface-hub>=0.21.0->accelerate) (4.12.2)
16
+ Requirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.127)
17
+ Requirement already satisfied: triton==3.1.0 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (3.1.0)
18
+ Requirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (11.6.1.9)
19
+ Requirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (2.21.5)
20
+ Requirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.3.1.170)
21
+ Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (3.1.4)
22
+ Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.127)
23
+ Requirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.127)
24
+ Requirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (9.1.0.70)
25
+ Requirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.127)
26
+ Requirement already satisfied: networkx in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (3.4.1)
27
+ Requirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.127)
28
+ Requirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (12.4.5.8)
29
+ Requirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (1.13.1)
30
+ Requirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (11.2.1.3)
31
+ Requirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /usr/local/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (10.3.5.147)
32
+ Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.10/site-packages (from sympy==1.13.1->torch>=1.10.0->accelerate) (1.3.0)
33
+ Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/site-packages (from jinja2->torch>=1.10.0->accelerate) (2.1.5)
34
+ Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2024.8.30)
35
+ Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.4.0)
36
+ Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2.2.3)
37
+ Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.10)
38
+ Installing collected packages: accelerate
39
+ Successfully installed accelerate-1.0.1
DejaVuSans.ttf ADDED
Binary file (757 kB). View file
 
__pycache__/app.cpython-310.pyc ADDED
Binary file (341 Bytes). View file
 
__pycache__/builder.cpython-310.pyc ADDED
Binary file (5.16 kB). View file
 
__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
__pycache__/controller.cpython-310.pyc ADDED
Binary file (8.18 kB). View file
 
__pycache__/conversation.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
__pycache__/gradio_css.cpython-310.pyc ADDED
Binary file (2.8 kB). View file
 
__pycache__/gradio_web_server.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
__pycache__/inference.cpython-310.pyc ADDED
Binary file (3.26 kB). View file
 
__pycache__/mm_utils.cpython-310.pyc ADDED
Binary file (9.15 kB). View file
 
__pycache__/model_worker.cpython-310.pyc ADDED
Binary file (8.77 kB). View file
 
__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.98 kB). View file
 
app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
builder.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+
14
+ import os
15
+ import shutil
16
+ import pdb
17
+
18
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
19
+ import torch
20
+
21
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
22
+ WORKER_HEART_BEAT_INTERVAL = 15
23
+
24
+ LOGDIR = "."
25
+
26
+ # Model Constants
27
+ IGNORE_INDEX = -100
28
+ IMAGE_TOKEN_INDEX = -200
29
+ DEFAULT_IMAGE_TOKEN = "<image>"
30
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
31
+ DEFAULT_IM_START_TOKEN = "<im_start>"
32
+ DEFAULT_IM_END_TOKEN = "<im_end>"
33
+ IMAGE_PLACEHOLDER = "<image-placeholder>"
34
+
35
+ # Added by Ferret
36
+ DEFAULT_REGION_FEA_TOKEN = "<region_fea>"
37
+ VOCAB_IMAGE_W = 1000
38
+ VOCAB_IMAGE_H = 1000
39
+
40
+ # GROUNDING PROMPTS
41
+ GROUNDING_TEMPLATES = [
42
+ '\nProvide the bounding boxes of the mentioned objects.',
43
+ '\nInclude the coordinates for each mentioned object.',
44
+ '\nLocate the objects with their coordinates.',
45
+ '\nAnswer in [x1, y1, x2, y2] format.',
46
+ '\nMention the objects and their locations using the format [x1, y1, x2, y2].',
47
+ '\nDraw boxes around the mentioned objects.',
48
+ '\nUse boxes to show where each thing is.',
49
+ '\nTell me where the objects are with coordinates.',
50
+ '\nList where each object is with boxes.',
51
+ '\nShow me the regions with boxes.'
52
+ ]
53
+ DEFAULT_REGION_FEA_TOKEN = "<region_fea>"
54
+
55
+ def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto"):
56
+ kwargs = {"device_map": device_map}
57
+
58
+ if load_8bit:
59
+ kwargs['load_in_8bit'] = True
60
+ elif load_4bit:
61
+ kwargs['load_in_4bit'] = True
62
+ kwargs['quantization_config'] = BitsAndBytesConfig(
63
+ load_in_4bit=True,
64
+ bnb_4bit_compute_dtype=torch.float16,
65
+ bnb_4bit_use_double_quant=True,
66
+ bnb_4bit_quant_type='nf4'
67
+ )
68
+ else:
69
+ kwargs['torch_dtype'] = torch.float16
70
+
71
+ if 'llava' in model_name.lower() or 'ferret' in model_name.lower():
72
+ # Load LLaVA/FERRET model
73
+ if 'lora' in model_name.lower() and model_base is not None:
74
+ lora_cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
75
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False, trust_remote_code=True)
76
+ print('Loading LLaVA/FERRET from base model...')
77
+ model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs, trust_remote_code=True)
78
+ token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
79
+ if model.lm_head.weight.shape[0] != token_num:
80
+ model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
81
+ model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
82
+
83
+ print('Loading additional LLaVA/FERRET weights...')
84
+ if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
85
+ non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
86
+ else:
87
+ # this is probably from HF Hub
88
+ from huggingface_hub import hf_hub_download
89
+ def load_from_hf(repo_id, filename, subfolder=None):
90
+ cache_file = hf_hub_download(
91
+ repo_id=repo_id,
92
+ filename=filename,
93
+ subfolder=subfolder)
94
+ return torch.load(cache_file, map_location='cpu')
95
+ non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
96
+ non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
97
+ if any(k.startswith('model.model.') for k in non_lora_trainables):
98
+ non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
99
+ model.load_state_dict(non_lora_trainables, strict=False)
100
+
101
+ from peft import PeftModel
102
+ print('Loading LoRA weights...')
103
+ model = PeftModel.from_pretrained(model, model_path, trust_remote_code=True)
104
+ print('Merging LoRA weights...')
105
+ model = model.merge_and_unload()
106
+ print('Model is loaded...')
107
+ elif model_base is not None:
108
+ # this may be mm projector only
109
+ print('Loading LLaVA/FERRET from base model...')
110
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
111
+ cfg_pretrained = AutoConfig.from_pretrained(model_path)
112
+ model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs, trust_remote_code=True)
113
+
114
+ mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
115
+ mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
116
+ model.load_state_dict(mm_projector_weights, strict=False)
117
+ else:
118
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
119
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs, trust_remote_code=True)
120
+ else:
121
+ # Load language model
122
+ if model_base is not None:
123
+ # PEFT model
124
+ from peft import PeftModel
125
+ tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
126
+ model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto", trust_remote_code=True)
127
+ print(f"Loading LoRA weights from {model_path}")
128
+ model = PeftModel.from_pretrained(model, model_path, trust_remote_code=True)
129
+ print(f"Merging weights")
130
+ model = model.merge_and_unload()
131
+ print('Convert to FP16...')
132
+ model.to(torch.float16)
133
+ else:
134
+ use_fast = False
135
+ tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
136
+ model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs, trust_remote_code=True)
137
+
138
+ image_processor = None
139
+
140
+ if 'llava' in model_name.lower() or 'ferret' in model_name.lower():
141
+ mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
142
+ mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
143
+ mm_im_region_fea_token = getattr(model.config, "im_region_fea_token", None)
144
+ if mm_use_im_patch_token:
145
+ tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
146
+ if mm_im_region_fea_token is not None:
147
+ tokenizer.add_tokens([DEFAULT_REGION_FEA_TOKEN], special_tokens=True)
148
+ if mm_use_im_start_end:
149
+ tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
150
+ model.resize_token_embeddings(len(tokenizer))
151
+
152
+ vision_tower = model.get_vision_tower()
153
+ vision_tower_path = os.path.join(model_path, 'vision_tower')
154
+ if not vision_tower.is_loaded or os.path.exists(vision_tower_path):
155
+ if os.path.exists(vision_tower_path):
156
+ print(f'Start Loading vision tower from {vision_tower_path}')
157
+ vision_tower.load_model(vision_tower_path=vision_tower_path)
158
+ print(f'Finish Loading vision tower from {vision_tower_path}')
159
+ else:
160
+ vision_tower.load_model()
161
+
162
+ vision_tower.to(device='cuda', dtype=torch.float16)
163
+ image_processor = vision_tower.image_processor
164
+
165
+ if hasattr(model.config, "max_sequence_length"):
166
+ context_len = model.config.max_sequence_length
167
+ else:
168
+ context_len = 2048
169
+
170
+ return tokenizer, model, image_processor, context_len
cli.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+
4
+ from constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5
+ from conversation import conv_templates, SeparatorStyle
6
+ from builder import load_pretrained_model
7
+ from utils import disable_torch_init
8
+ from ferretui.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path
9
+
10
+ from PIL import Image
11
+
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+ from transformers import TextStreamer
16
+
17
+
18
+ def load_image(image_file):
19
+ if image_file.startswith('http://') or image_file.startswith('https://'):
20
+ response = requests.get(image_file)
21
+ image = Image.open(BytesIO(response.content)).convert('RGB')
22
+ else:
23
+ image = Image.open(image_file).convert('RGB')
24
+ return image
25
+
26
+
27
+ def main(args):
28
+ # Model
29
+ disable_torch_init()
30
+
31
+ model_name = get_model_name_from_path(args.model_path)
32
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
33
+
34
+ if "llama-2" in model_name.lower():
35
+ conv_mode = "llava_llama_2"
36
+ elif "mistral" in model_name.lower():
37
+ conv_mode = "mistral_instruct"
38
+ elif "v1.6-34b" in model_name.lower():
39
+ conv_mode = "chatml_direct"
40
+ elif "v1" in model_name.lower():
41
+ conv_mode = "llava_v1"
42
+ elif "mpt" in model_name.lower():
43
+ conv_mode = "mpt"
44
+ if "gemma" in model_name.lower():
45
+ conv_mode = "ferret_gemma_instruct"
46
+ if "llama" in model_name.lower():
47
+ conv_mode = "ferret_llama_3"
48
+ else:
49
+ conv_mode = "llava_v0"
50
+
51
+ if args.conv_mode is not None and conv_mode != args.conv_mode:
52
+ print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
53
+ else:
54
+ args.conv_mode = conv_mode
55
+
56
+ conv = conv_templates[args.conv_mode].copy()
57
+ if "mpt" in model_name.lower():
58
+ roles = ('user', 'assistant')
59
+ else:
60
+ roles = conv.roles
61
+
62
+ image = load_image(args.image_file)
63
+ image_size = image.size
64
+ # Similar operation in model_worker.py
65
+ image_tensor = process_images([image], image_processor, model.config)
66
+ if type(image_tensor) is list:
67
+ image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
68
+ else:
69
+ image_tensor = image_tensor.to(model.device, dtype=torch.float16)
70
+
71
+ while True:
72
+ try:
73
+ inp = input(f"{roles[0]}: ")
74
+ except EOFError:
75
+ inp = ""
76
+ if not inp:
77
+ print("exit...")
78
+ break
79
+
80
+ print(f"{roles[1]}: ", end="")
81
+
82
+ if image is not None:
83
+ # first message
84
+ if model.config.mm_use_im_start_end:
85
+ inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
86
+ else:
87
+ inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
88
+ image = None
89
+
90
+ conv.append_message(conv.roles[0], inp)
91
+ conv.append_message(conv.roles[1], None)
92
+ prompt = conv.get_prompt()
93
+
94
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
95
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
96
+ keywords = [stop_str]
97
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
98
+
99
+ with torch.inference_mode():
100
+ output_ids = model.generate(
101
+ input_ids,
102
+ images=image_tensor,
103
+ image_sizes=[image_size],
104
+ do_sample=True if args.temperature > 0 else False,
105
+ temperature=args.temperature,
106
+ max_new_tokens=args.max_new_tokens,
107
+ streamer=streamer,
108
+ use_cache=True)
109
+
110
+ outputs = tokenizer.decode(output_ids[0]).strip()
111
+ conv.messages[-1][-1] = outputs
112
+
113
+ if args.debug:
114
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
115
+
116
+
117
+ if __name__ == "__main__":
118
+ parser = argparse.ArgumentParser()
119
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
120
+ parser.add_argument("--model-base", type=str, default=None)
121
+ parser.add_argument("--image-file", type=str, required=True)
122
+ parser.add_argument("--device", type=str, default="cuda")
123
+ parser.add_argument("--conv-mode", type=str, default=None)
124
+ parser.add_argument("--temperature", type=float, default=0.2)
125
+ parser.add_argument("--max-new-tokens", type=int, default=512)
126
+ parser.add_argument("--load-8bit", action="store_true")
127
+ parser.add_argument("--load-4bit", action="store_true")
128
+ parser.add_argument("--debug", action="store_true")
129
+ args = parser.parse_args()
130
+ main(args)
constants.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
+ WORKER_HEART_BEAT_INTERVAL = 15
3
+
4
+ LOGDIR = "."
5
+
6
+ # Model Constants
7
+ IGNORE_INDEX = -100
8
+ IMAGE_TOKEN_INDEX = -200
9
+ DEFAULT_IMAGE_TOKEN = "<image>"
10
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
11
+ DEFAULT_IM_START_TOKEN = "<im_start>"
12
+ DEFAULT_IM_END_TOKEN = "<im_end>"
13
+ IMAGE_PLACEHOLDER = "<image-placeholder>"
14
+
15
+ # Added by Ferret
16
+ DEFAULT_REGION_FEA_TOKEN = "<region_fea>"
17
+ VOCAB_IMAGE_W = 1000
18
+ VOCAB_IMAGE_H = 1000
19
+
20
+ # GROUNDING PROMPTS
21
+ GROUNDING_TEMPLATES = [
22
+ '\nProvide the bounding boxes of the mentioned objects.',
23
+ '\nInclude the coordinates for each mentioned object.',
24
+ '\nLocate the objects with their coordinates.',
25
+ '\nAnswer in [x1, y1, x2, y2] format.',
26
+ '\nMention the objects and their locations using the format [x1, y1, x2, y2].',
27
+ '\nDraw boxes around the mentioned objects.',
28
+ '\nUse boxes to show where each thing is.',
29
+ '\nTell me where the objects are with coordinates.',
30
+ '\nList where each object is with boxes.',
31
+ '\nShow me the regions with boxes.'
32
+ ]
controller.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A controller manages distributed workers.
3
+ It sends worker addresses to clients.
4
+ """
5
+ import argparse
6
+ import asyncio
7
+ import dataclasses
8
+ from enum import Enum, auto
9
+ import json
10
+ import logging
11
+ import time
12
+ from typing import List, Union
13
+ import threading
14
+
15
+ from fastapi import FastAPI, Request
16
+ from fastapi.responses import StreamingResponse
17
+ import numpy as np
18
+ import requests
19
+ import uvicorn
20
+
21
+ CONTROLLER_HEART_BEAT_EXPIRATION = 30
22
+ from utils import build_logger, server_error_msg
23
+
24
+
25
+ logger = build_logger("controller", "controller.log")
26
+
27
+
28
+ class DispatchMethod(Enum):
29
+ LOTTERY = auto()
30
+ SHORTEST_QUEUE = auto()
31
+
32
+ @classmethod
33
+ def from_str(cls, name):
34
+ if name == "lottery":
35
+ return cls.LOTTERY
36
+ elif name == "shortest_queue":
37
+ return cls.SHORTEST_QUEUE
38
+ else:
39
+ raise ValueError(f"Invalid dispatch method")
40
+
41
+
42
+ @dataclasses.dataclass
43
+ class WorkerInfo:
44
+ model_names: List[str]
45
+ speed: int
46
+ queue_length: int
47
+ check_heart_beat: bool
48
+ last_heart_beat: str
49
+
50
+
51
+ def heart_beat_controller(controller):
52
+ while True:
53
+ time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
54
+ controller.remove_stable_workers_by_expiration()
55
+
56
+
57
+ class Controller:
58
+ def __init__(self, dispatch_method: str):
59
+ # Dict[str -> WorkerInfo]
60
+ self.worker_info = {}
61
+ self.dispatch_method = DispatchMethod.from_str(dispatch_method)
62
+
63
+ self.heart_beat_thread = threading.Thread(
64
+ target=heart_beat_controller, args=(self,), daemon=True)
65
+ self.heart_beat_thread.start()
66
+
67
+ logger.info("Init controller")
68
+
69
+ def register_worker(self, worker_name: str, check_heart_beat: bool,
70
+ worker_status: dict):
71
+ if worker_name not in self.worker_info:
72
+ logger.info(f"Register a new worker: {worker_name}")
73
+ else:
74
+ logger.info(f"Register an existing worker: {worker_name}")
75
+
76
+ if not worker_status:
77
+ worker_status = self.get_worker_status(worker_name)
78
+ if not worker_status:
79
+ return False
80
+
81
+ self.worker_info[worker_name] = WorkerInfo(
82
+ worker_status["model_names"], worker_status["speed"], worker_status["queue_length"],
83
+ check_heart_beat, time.time())
84
+
85
+ logger.info(f"Register done: {worker_name}, {worker_status}")
86
+ return True
87
+
88
+ def get_worker_status(self, worker_name: str):
89
+ try:
90
+ r = requests.post(worker_name + "/worker_get_status", timeout=5)
91
+ except requests.exceptions.RequestException as e:
92
+ logger.error(f"Get status fails: {worker_name}, {e}")
93
+ return None
94
+
95
+ if r.status_code != 200:
96
+ logger.error(f"Get status fails: {worker_name}, {r}")
97
+ return None
98
+
99
+ return r.json()
100
+
101
+ def remove_worker(self, worker_name: str):
102
+ del self.worker_info[worker_name]
103
+
104
+ def refresh_all_workers(self):
105
+ old_info = dict(self.worker_info)
106
+ self.worker_info = {}
107
+
108
+ for w_name, w_info in old_info.items():
109
+ if not self.register_worker(w_name, w_info.check_heart_beat, None):
110
+ logger.info(f"Remove stale worker: {w_name}")
111
+
112
+ def list_models(self):
113
+ model_names = set()
114
+
115
+ for w_name, w_info in self.worker_info.items():
116
+ model_names.update(w_info.model_names)
117
+
118
+ return list(model_names)
119
+
120
+ def get_worker_address(self, model_name: str):
121
+ if self.dispatch_method == DispatchMethod.LOTTERY:
122
+ worker_names = []
123
+ worker_speeds = []
124
+ for w_name, w_info in self.worker_info.items():
125
+ if model_name in w_info.model_names:
126
+ worker_names.append(w_name)
127
+ worker_speeds.append(w_info.speed)
128
+ worker_speeds = np.array(worker_speeds, dtype=np.float32)
129
+ norm = np.sum(worker_speeds)
130
+ if norm < 1e-4:
131
+ return ""
132
+ worker_speeds = worker_speeds / norm
133
+ if True: # Directly return address
134
+ pt = np.random.choice(np.arange(len(worker_names)),
135
+ p=worker_speeds)
136
+ worker_name = worker_names[pt]
137
+ return worker_name
138
+
139
+ # Check status before returning
140
+ while True:
141
+ pt = np.random.choice(np.arange(len(worker_names)),
142
+ p=worker_speeds)
143
+ worker_name = worker_names[pt]
144
+
145
+ if self.get_worker_status(worker_name):
146
+ break
147
+ else:
148
+ self.remove_worker(worker_name)
149
+ worker_speeds[pt] = 0
150
+ norm = np.sum(worker_speeds)
151
+ if norm < 1e-4:
152
+ return ""
153
+ worker_speeds = worker_speeds / norm
154
+ continue
155
+ return worker_name
156
+ elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
157
+ worker_names = []
158
+ worker_qlen = []
159
+ for w_name, w_info in self.worker_info.items():
160
+ if model_name in w_info.model_names:
161
+ worker_names.append(w_name)
162
+ worker_qlen.append(w_info.queue_length / w_info.speed)
163
+ if len(worker_names) == 0:
164
+ return ""
165
+ min_index = np.argmin(worker_qlen)
166
+ w_name = worker_names[min_index]
167
+ self.worker_info[w_name].queue_length += 1
168
+ logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
169
+ return w_name
170
+ else:
171
+ raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
172
+
173
+ def receive_heart_beat(self, worker_name: str, queue_length: int):
174
+ if worker_name not in self.worker_info:
175
+ logger.info(f"Receive unknown heart beat. {worker_name}")
176
+ return False
177
+
178
+ self.worker_info[worker_name].queue_length = queue_length
179
+ self.worker_info[worker_name].last_heart_beat = time.time()
180
+ logger.info(f"Receive heart beat. {worker_name}")
181
+ return True
182
+
183
+ def remove_stable_workers_by_expiration(self):
184
+ expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
185
+ to_delete = []
186
+ for worker_name, w_info in self.worker_info.items():
187
+ if w_info.check_heart_beat and w_info.last_heart_beat < expire:
188
+ to_delete.append(worker_name)
189
+
190
+ for worker_name in to_delete:
191
+ self.remove_worker(worker_name)
192
+
193
+ def worker_api_generate_stream(self, params):
194
+ worker_addr = self.get_worker_address(params["model"])
195
+ if not worker_addr:
196
+ logger.info(f"no worker: {params['model']}")
197
+ ret = {
198
+ "text": server_error_msg,
199
+ "error_code": 2,
200
+ }
201
+ yield json.dumps(ret).encode() + b"\0"
202
+
203
+ try:
204
+ response = requests.post(worker_addr + "/worker_generate_stream",
205
+ json=params, stream=True, timeout=5)
206
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
207
+ if chunk:
208
+ yield chunk + b"\0"
209
+ except requests.exceptions.RequestException as e:
210
+ logger.info(f"worker timeout: {worker_addr}")
211
+ ret = {
212
+ "text": server_error_msg,
213
+ "error_code": 3,
214
+ }
215
+ yield json.dumps(ret).encode() + b"\0"
216
+
217
+
218
+ # Let the controller act as a worker to achieve hierarchical
219
+ # management. This can be used to connect isolated sub networks.
220
+ def worker_api_get_status(self):
221
+ model_names = set()
222
+ speed = 0
223
+ queue_length = 0
224
+
225
+ for w_name in self.worker_info:
226
+ worker_status = self.get_worker_status(w_name)
227
+ if worker_status is not None:
228
+ model_names.update(worker_status["model_names"])
229
+ speed += worker_status["speed"]
230
+ queue_length += worker_status["queue_length"]
231
+
232
+ return {
233
+ "model_names": list(model_names),
234
+ "speed": speed,
235
+ "queue_length": queue_length,
236
+ }
237
+
238
+
239
+ app = FastAPI()
240
+
241
+
242
+ @app.post("/register_worker")
243
+ async def register_worker(request: Request):
244
+ data = await request.json()
245
+ controller.register_worker(
246
+ data["worker_name"], data["check_heart_beat"],
247
+ data.get("worker_status", None))
248
+
249
+
250
+ @app.post("/refresh_all_workers")
251
+ async def refresh_all_workers():
252
+ models = controller.refresh_all_workers()
253
+
254
+
255
+ @app.post("/list_models")
256
+ async def list_models():
257
+ models = controller.list_models()
258
+ return {"models": models}
259
+
260
+
261
+ @app.post("/get_worker_address")
262
+ async def get_worker_address(request: Request):
263
+ data = await request.json()
264
+ addr = controller.get_worker_address(data["model"])
265
+ return {"address": addr}
266
+
267
+
268
+ @app.post("/receive_heart_beat")
269
+ async def receive_heart_beat(request: Request):
270
+ data = await request.json()
271
+ exist = controller.receive_heart_beat(
272
+ data["worker_name"], data["queue_length"])
273
+ return {"exist": exist}
274
+
275
+
276
+ @app.post("/worker_generate_stream")
277
+ async def worker_api_generate_stream(request: Request):
278
+ params = await request.json()
279
+ generator = controller.worker_api_generate_stream(params)
280
+ return StreamingResponse(generator)
281
+
282
+
283
+ @app.post("/worker_get_status")
284
+ async def worker_api_get_status(request: Request):
285
+ return controller.worker_api_get_status()
286
+
287
+
288
+ if __name__ == "__main__":
289
+ parser = argparse.ArgumentParser()
290
+ parser.add_argument("--host", type=str, default="localhost")
291
+ parser.add_argument("--port", type=int, default=21001)
292
+ parser.add_argument("--dispatch-method", type=str, choices=[
293
+ "lottery", "shortest_queue"], default="shortest_queue")
294
+ args = parser.parse_args()
295
+ logger.info(f"args: {args}")
296
+
297
+ controller = Controller(args.dispatch_method)
298
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
conversation.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from enum import auto, Enum
3
+ from typing import List, Tuple
4
+ import base64
5
+ from io import BytesIO
6
+ from PIL import Image
7
+
8
+ VOCAB_IMAGE_W = 1000 # 224
9
+ VOCAB_IMAGE_H = 1000 # 224
10
+
11
+
12
+ class SeparatorStyle(Enum):
13
+ """Different separator style."""
14
+ SINGLE = auto()
15
+ TWO = auto()
16
+ MPT = auto()
17
+ PLAIN = auto()
18
+ LLAMA_2 = auto()
19
+ GEMMA = auto()
20
+
21
+
22
+ @dataclasses.dataclass
23
+ class Conversation:
24
+ """A class that keeps all conversation history."""
25
+ system: str
26
+ roles: List[str]
27
+ messages: List[List[str]]
28
+ offset: int
29
+ sep_style: SeparatorStyle = SeparatorStyle.SINGLE
30
+ sep: str = "###"
31
+ sep2: str = None
32
+ version: str = "Unknown"
33
+
34
+ skip_next: bool = False
35
+
36
+ def get_prompt(self):
37
+ messages = self.messages
38
+ if len(messages) > 0 and type(messages[0][1]) is tuple:
39
+ messages = self.messages.copy()
40
+ init_role, init_msg = messages[0].copy()
41
+ init_msg = init_msg[0].replace("<image>", "").strip()
42
+ if 'mmtag' in self.version:
43
+ messages[0] = (init_role, init_msg)
44
+ messages.insert(0, (self.roles[0], "<Image><image></Image>"))
45
+ messages.insert(1, (self.roles[1], "Received."))
46
+ else:
47
+ messages[0] = (init_role, "<image>\n" + init_msg)
48
+
49
+ if self.sep_style == SeparatorStyle.SINGLE:
50
+ ret = self.system + self.sep
51
+ for role, message in messages:
52
+ if message:
53
+ if type(message) is tuple:
54
+ message, _, _ = message
55
+ ret += role + ": " + message + self.sep
56
+ else:
57
+ ret += role + ":"
58
+ elif self.sep_style == SeparatorStyle.TWO:
59
+ seps = [self.sep, self.sep2]
60
+ ret = self.system + seps[0]
61
+ for i, (role, message) in enumerate(messages):
62
+ if message:
63
+ if type(message) is tuple:
64
+ message, _, _ = message
65
+ ret += role + ": " + message + seps[i % 2]
66
+ else:
67
+ ret += role + ":"
68
+ elif self.sep_style == SeparatorStyle.MPT:
69
+ ret = self.system + self.sep
70
+ for role, message in messages:
71
+ if message:
72
+ if type(message) is tuple:
73
+ message, _, _ = message
74
+ ret += role + message + self.sep
75
+ else:
76
+ ret += role
77
+ elif self.sep_style == SeparatorStyle.LLAMA_2:
78
+ wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n" if len(msg) > 0 else msg
79
+ wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
80
+ ret = ""
81
+
82
+ for i, (role, message) in enumerate(messages):
83
+ if i == 0:
84
+ assert message, "first message should not be none"
85
+ assert role == self.roles[0], "first message should come from user"
86
+ if message:
87
+ if type(message) is tuple:
88
+ message, _, _ = message
89
+ if i == 0: message = wrap_sys(self.system) + message
90
+ if i % 2 == 0:
91
+ message = wrap_inst(message)
92
+ ret += self.sep + message
93
+ else:
94
+ ret += " " + message + " " + self.sep2
95
+ else:
96
+ ret += ""
97
+ ret = ret.lstrip(self.sep)
98
+ elif self.sep_style == SeparatorStyle.GEMMA:
99
+ seps = [self.sep, self.sep2]
100
+ ret = self.system + seps[0]
101
+ for i, (role, message) in enumerate(messages):
102
+ if message:
103
+ if type(message) is tuple:
104
+ message, _, _ = message
105
+ ret += "<start_of_turn>" + role + "\n" + message + "<end_of_turn>\n" + seps[i % 2]
106
+ else:
107
+ ret += "<start_of_turn>" + role + "\n"
108
+ elif self.sep_style == SeparatorStyle.PLAIN:
109
+ seps = [self.sep, self.sep2]
110
+ ret = self.system
111
+ for i, (role, message) in enumerate(messages):
112
+ if message:
113
+ if type(message) is tuple:
114
+ message, _, _ = message
115
+ ret += message + seps[i % 2]
116
+ else:
117
+ ret += ""
118
+ else:
119
+ raise ValueError(f"Invalid style: {self.sep_style}")
120
+
121
+ return ret
122
+
123
+ def append_message(self, role, message):
124
+ self.messages.append([role, message])
125
+
126
+ def get_images(self, return_pil=False):
127
+ images = []
128
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
129
+ if i % 2 == 0:
130
+ if type(msg) is tuple:
131
+ import base64
132
+ from io import BytesIO
133
+ from PIL import Image
134
+ msg, image, image_process_mode = msg
135
+ if image_process_mode == "Pad":
136
+ def expand2square(pil_img, background_color=(122, 116, 104)):
137
+ width, height = pil_img.size
138
+ if width == height:
139
+ return pil_img
140
+ elif width > height:
141
+ result = Image.new(pil_img.mode, (width, width), background_color)
142
+ result.paste(pil_img, (0, (width - height) // 2))
143
+ return result
144
+ else:
145
+ result = Image.new(pil_img.mode, (height, height), background_color)
146
+ result.paste(pil_img, ((height - width) // 2, 0))
147
+ return result
148
+ image = expand2square(image)
149
+ elif image_process_mode in ["Default", "Crop"]:
150
+ pass
151
+ elif image_process_mode == "Raw+Processor":
152
+ pass
153
+ elif image_process_mode == "Resize":
154
+ image = image.resize((336, 336))
155
+ else:
156
+ raise ValueError(f"Invalid image_process_mode: {image_process_mode}")
157
+
158
+ if image_process_mode != "Raw+Processor":
159
+ max_hw, min_hw = max(image.size), min(image.size)
160
+ aspect_ratio = max_hw / min_hw
161
+ max_len, min_len = 800, 400
162
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
163
+ longest_edge = int(shortest_edge * aspect_ratio)
164
+ W, H = image.size
165
+ if H > W:
166
+ H, W = longest_edge, shortest_edge
167
+ else:
168
+ H, W = shortest_edge, longest_edge
169
+ image = image.resize((W, H))
170
+ print('Input Image Size:{}'.format(image.size))
171
+
172
+ if return_pil:
173
+ images.append(image)
174
+ else:
175
+ buffered = BytesIO()
176
+ image.save(buffered, format="PNG")
177
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
178
+ images.append(img_b64_str)
179
+ return images
180
+
181
+ def to_gradio_chatbot(self):
182
+ ret = []
183
+ for i, (role, msg) in enumerate(self.messages[self.offset:]):
184
+ if i % 2 == 0:
185
+ if type(msg) is tuple:
186
+ import base64
187
+ from io import BytesIO
188
+ msg, image, image_process_mode = msg
189
+ if image_process_mode != "Raw+Processor":
190
+ max_hw, min_hw = max(image.size), min(image.size)
191
+ aspect_ratio = max_hw / min_hw
192
+ max_len, min_len = 800, 400
193
+ shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
194
+ longest_edge = int(shortest_edge * aspect_ratio)
195
+ W, H = image.size
196
+ if H > W:
197
+ H, W = longest_edge, shortest_edge
198
+ else:
199
+ H, W = shortest_edge, longest_edge
200
+ image = image.resize((W, H))
201
+ buffered = BytesIO()
202
+ image.save(buffered, format="JPEG")
203
+ img_b64_str = base64.b64encode(buffered.getvalue()).decode()
204
+ img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
205
+ ret.append([img_str, None])
206
+ msg = msg.replace('<image>', '').strip()
207
+ if len(msg) > 0:
208
+ ret.append([msg, None])
209
+ else:
210
+ ret.append([msg, None])
211
+ else:
212
+ ret[-1][-1] = msg
213
+ return ret
214
+
215
+ def copy(self):
216
+ return Conversation(
217
+ system=self.system,
218
+ roles=self.roles,
219
+ messages=[[x, y] for x, y in self.messages],
220
+ offset=self.offset,
221
+ sep_style=self.sep_style,
222
+ sep=self.sep,
223
+ sep2=self.sep2,
224
+ version=self.version)
225
+
226
+ def dict(self):
227
+ if len(self.get_images()) > 0:
228
+ return {
229
+ "system": self.system,
230
+ "roles": self.roles,
231
+ "messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
232
+ "offset": self.offset,
233
+ "sep": self.sep,
234
+ "sep2": self.sep2,
235
+ }
236
+ return {
237
+ "system": self.system,
238
+ "roles": self.roles,
239
+ "messages": self.messages,
240
+ "offset": self.offset,
241
+ "sep": self.sep,
242
+ "sep2": self.sep2,
243
+ }
244
+
245
+
246
+ conv_vicuna_v0 = Conversation(
247
+ system="A chat between a curious human and an artificial intelligence assistant. "
248
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
249
+ roles=("Human", "Assistant"),
250
+ messages=(
251
+ ("Human", "What are the key differences between renewable and non-renewable energy sources?"),
252
+ ("Assistant",
253
+ "Renewable energy sources are those that can be replenished naturally in a relatively "
254
+ "short amount of time, such as solar, wind, hydro, geothermal, and biomass. "
255
+ "Non-renewable energy sources, on the other hand, are finite and will eventually be "
256
+ "depleted, such as coal, oil, and natural gas. Here are some key differences between "
257
+ "renewable and non-renewable energy sources:\n"
258
+ "1. Availability: Renewable energy sources are virtually inexhaustible, while non-renewable "
259
+ "energy sources are finite and will eventually run out.\n"
260
+ "2. Environmental impact: Renewable energy sources have a much lower environmental impact "
261
+ "than non-renewable sources, which can lead to air and water pollution, greenhouse gas emissions, "
262
+ "and other negative effects.\n"
263
+ "3. Cost: Renewable energy sources can be more expensive to initially set up, but they typically "
264
+ "have lower operational costs than non-renewable sources.\n"
265
+ "4. Reliability: Renewable energy sources are often more reliable and can be used in more remote "
266
+ "locations than non-renewable sources.\n"
267
+ "5. Flexibility: Renewable energy sources are often more flexible and can be adapted to different "
268
+ "situations and needs, while non-renewable sources are more rigid and inflexible.\n"
269
+ "6. Sustainability: Renewable energy sources are more sustainable over the long term, while "
270
+ "non-renewable sources are not, and their depletion can lead to economic and social instability.\n")
271
+ ),
272
+ offset=2,
273
+ sep_style=SeparatorStyle.SINGLE,
274
+ sep="###",
275
+ )
276
+
277
+ conv_vicuna_v1 = Conversation(
278
+ system="A chat between a curious user and an artificial intelligence assistant. "
279
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
280
+ roles=("USER", "ASSISTANT"),
281
+ version="v1",
282
+ messages=(),
283
+ offset=0,
284
+ sep_style=SeparatorStyle.TWO,
285
+ sep=" ",
286
+ sep2="</s>",
287
+ )
288
+
289
+ conv_llama_2 = Conversation(
290
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
291
+
292
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
293
+ roles=("USER", "ASSISTANT"),
294
+ version="llama_v2",
295
+ messages=(),
296
+ offset=0,
297
+ sep_style=SeparatorStyle.LLAMA_2,
298
+ sep="<s>",
299
+ sep2="</s>",
300
+ )
301
+
302
+ conv_llava_llama_2 = Conversation(
303
+ system="You are a helpful language and vision assistant. "
304
+ "You are able to understand the visual content that the user provides, "
305
+ "and assist the user with a variety of tasks using natural language.",
306
+ roles=("USER", "ASSISTANT"),
307
+ version="llama_v2",
308
+ messages=(),
309
+ offset=0,
310
+ sep_style=SeparatorStyle.LLAMA_2,
311
+ sep="<s>",
312
+ sep2="</s>",
313
+ )
314
+
315
+ conv_mpt = Conversation(
316
+ system="""<|im_start|>system
317
+ A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
318
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
319
+ version="mpt",
320
+ messages=(),
321
+ offset=0,
322
+ sep_style=SeparatorStyle.MPT,
323
+ sep="<|im_end|>",
324
+ )
325
+
326
+ conv_llava_plain = Conversation(
327
+ system="",
328
+ roles=("", ""),
329
+ messages=(
330
+ ),
331
+ offset=0,
332
+ sep_style=SeparatorStyle.PLAIN,
333
+ sep="\n",
334
+ )
335
+
336
+ conv_llava_v0 = Conversation(
337
+ system="A chat between a curious human and an artificial intelligence assistant. "
338
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
339
+ roles=("Human", "Assistant"),
340
+ messages=(
341
+ ),
342
+ offset=0,
343
+ sep_style=SeparatorStyle.SINGLE,
344
+ sep="###",
345
+ )
346
+
347
+ conv_llava_v0_mmtag = Conversation(
348
+ system="A chat between a curious user and an artificial intelligence assistant. "
349
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
350
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
351
+ roles=("Human", "Assistant"),
352
+ messages=(
353
+ ),
354
+ offset=0,
355
+ sep_style=SeparatorStyle.SINGLE,
356
+ sep="###",
357
+ version="v0_mmtag",
358
+ )
359
+
360
+ conv_llava_v1 = Conversation(
361
+ system="A chat between a curious human and an artificial intelligence assistant. "
362
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
363
+ roles=("USER", "ASSISTANT"),
364
+ version="v1",
365
+ messages=(),
366
+ offset=0,
367
+ sep_style=SeparatorStyle.TWO,
368
+ sep=" ",
369
+ sep2="</s>",
370
+ )
371
+
372
+ conv_llava_v1_mmtag = Conversation(
373
+ system="A chat between a curious user and an artificial intelligence assistant. "
374
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
375
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
376
+ roles=("USER", "ASSISTANT"),
377
+ messages=(),
378
+ offset=0,
379
+ sep_style=SeparatorStyle.TWO,
380
+ sep=" ",
381
+ sep2="</s>",
382
+ version="v1_mmtag",
383
+ )
384
+
385
+ conv_mistral_instruct = Conversation(
386
+ system="",
387
+ roles=("USER", "ASSISTANT"),
388
+ version="llama_v2",
389
+ messages=(),
390
+ offset=0,
391
+ sep_style=SeparatorStyle.LLAMA_2,
392
+ sep="",
393
+ sep2="</s>",
394
+ )
395
+
396
+ conv_chatml_direct = Conversation(
397
+ system="""<|im_start|>system
398
+ Answer the questions.""",
399
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
400
+ version="mpt",
401
+ messages=(),
402
+ offset=0,
403
+ sep_style=SeparatorStyle.MPT,
404
+ sep="<|im_end|>",
405
+ )
406
+
407
+ conv_llava_llama_3 = Conversation(
408
+ system="<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
409
+ roles=("<|start_header_id|>user<|end_header_id|>\n\n",
410
+ "<|start_header_id|>assistant<|end_header_id|>\n\n"),
411
+ version="llama3",
412
+ messages=[],
413
+ offset=0,
414
+ sep_style=SeparatorStyle.MPT,
415
+ sep="<|eot_id|>",
416
+ )
417
+
418
+ conv_ferret_llama_plain = Conversation(
419
+ system="",
420
+ roles=("", ""),
421
+ messages=(
422
+ ),
423
+ offset=0,
424
+ sep_style=SeparatorStyle.PLAIN,
425
+ sep="\n",
426
+ )
427
+
428
+ conv_ferret_vicuna_v1 = Conversation(
429
+ system="A chat between a human and an AI that understands visuals. "
430
+ "In images, [x, y] denotes points: top-left [0, 0], bottom-right [width-1, height-1]. "
431
+ "Increasing x moves right; y moves down. "
432
+ f"Bounding box: [x1, y1, x2, y2]. Image size: {VOCAB_IMAGE_W}x{VOCAB_IMAGE_H}. "
433
+ "Follow instructions.",
434
+ roles=("USER", "ASSISTANT"),
435
+ version="v1",
436
+ messages=(),
437
+ offset=0,
438
+ sep_style=SeparatorStyle.TWO,
439
+ sep=" ",
440
+ sep2="</s>",
441
+ )
442
+
443
+ conv_ferret_llama_3 = Conversation(
444
+ system="<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nA chat between a human and an AI that understands visuals. "
445
+ "In images, [x, y] denotes points: top-left [0, 0], bottom-right [width-1, height-1]. "
446
+ "Increasing x moves right; y moves down. "
447
+ f"Bounding box: [x1, y1, x2, y2]. Image size: {VOCAB_IMAGE_W}x{VOCAB_IMAGE_H}. "
448
+ "Follow instructions.",
449
+ roles=("<|start_header_id|>user<|end_header_id|>\n\n",
450
+ "<|start_header_id|>assistant<|end_header_id|>\n\n"),
451
+ version="llama3",
452
+ messages=[],
453
+ offset=0,
454
+ sep_style=SeparatorStyle.MPT,
455
+ sep="<|eot_id|>",
456
+ )
457
+
458
+ conv_ferret_gemma_plain = Conversation(
459
+ system="",
460
+ roles=("user", "model"),
461
+ version="gemma",
462
+ messages=(),
463
+ offset=0,
464
+ sep_style=SeparatorStyle.GEMMA,
465
+ sep="",
466
+ sep2="<eos>",
467
+ )
468
+
469
+ conv_ferret_gemma_instruct = Conversation(
470
+ system="A chat between a human and an AI that understands visuals. "
471
+ "In images, [x, y] denotes points: top-left [0, 0], bottom-right [width-1, height-1]. "
472
+ "Increasing x moves right; y moves down. "
473
+ f"Bounding box: [x1, y1, x2, y2]. Image size: {VOCAB_IMAGE_W}x{VOCAB_IMAGE_H}. "
474
+ "Follow instructions.",
475
+ roles=("user", "model"),
476
+ version="gemma",
477
+ messages=(),
478
+ offset=0,
479
+ sep_style=SeparatorStyle.GEMMA,
480
+ sep="",
481
+ sep2="<eos>",
482
+ )
483
+
484
+ conv_ferret_phi3_instruct = Conversation(
485
+ system="""<|system|>\nYou are a helpful AI assistant.""",
486
+ roles=("\n<|user|>\n", "\n<|assistant|>\n"),
487
+ version="phi3",
488
+ messages=(),
489
+ offset=0,
490
+ sep_style=SeparatorStyle.MPT,
491
+ sep="<|end|>",
492
+ )
493
+
494
+ default_conversation = conv_ferret_gemma_instruct
495
+ conv_templates = {
496
+ "default": conv_vicuna_v0,
497
+ "v0": conv_vicuna_v0,
498
+ "v1": conv_vicuna_v1,
499
+ "vicuna_v1": conv_vicuna_v1,
500
+ "llama_2": conv_llama_2,
501
+ "mistral_instruct": conv_mistral_instruct,
502
+ "chatml_direct": conv_chatml_direct,
503
+ "mistral_direct": conv_chatml_direct,
504
+
505
+ "plain": conv_llava_plain,
506
+ "v0_plain": conv_llava_plain,
507
+ "llava_v0": conv_llava_v0,
508
+ "v0_mmtag": conv_llava_v0_mmtag,
509
+ "llava_v1": conv_llava_v1,
510
+ "v1_mmtag": conv_llava_v1_mmtag,
511
+ "llava_llama_2": conv_llava_llama_2,
512
+ "llava_llama_3": conv_llava_llama_3,
513
+
514
+ "ferret_llama_plain": conv_ferret_llama_plain,
515
+ "ferret_vicuna_v1": conv_ferret_vicuna_v1,
516
+ "ferret_llama_3": conv_ferret_llama_3,
517
+ "ferret_gemma_plain": conv_ferret_gemma_plain,
518
+ "ferret_gemma_instruct": conv_ferret_gemma_instruct,
519
+ "ferret_phi3_instruct": conv_ferret_phi3_instruct,
520
+
521
+ "mpt": conv_mpt,
522
+ }
523
+
524
+
525
+ if __name__ == "__main__":
526
+ print(default_conversation.get_prompt())
gradio_css.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ code_highlight_css = (
2
+ """
3
+ #chatbot .hll { background-color: #ffffcc }
4
+ #chatbot .c { color: #408080; font-style: italic }
5
+ #chatbot .err { border: 1px solid #FF0000 }
6
+ #chatbot .k { color: #008000; font-weight: bold }
7
+ #chatbot .o { color: #666666 }
8
+ #chatbot .ch { color: #408080; font-style: italic }
9
+ #chatbot .cm { color: #408080; font-style: italic }
10
+ #chatbot .cp { color: #BC7A00 }
11
+ #chatbot .cpf { color: #408080; font-style: italic }
12
+ #chatbot .c1 { color: #408080; font-style: italic }
13
+ #chatbot .cs { color: #408080; font-style: italic }
14
+ #chatbot .gd { color: #A00000 }
15
+ #chatbot .ge { font-style: italic }
16
+ #chatbot .gr { color: #FF0000 }
17
+ #chatbot .gh { color: #000080; font-weight: bold }
18
+ #chatbot .gi { color: #00A000 }
19
+ #chatbot .go { color: #888888 }
20
+ #chatbot .gp { color: #000080; font-weight: bold }
21
+ #chatbot .gs { font-weight: bold }
22
+ #chatbot .gu { color: #800080; font-weight: bold }
23
+ #chatbot .gt { color: #0044DD }
24
+ #chatbot .kc { color: #008000; font-weight: bold }
25
+ #chatbot .kd { color: #008000; font-weight: bold }
26
+ #chatbot .kn { color: #008000; font-weight: bold }
27
+ #chatbot .kp { color: #008000 }
28
+ #chatbot .kr { color: #008000; font-weight: bold }
29
+ #chatbot .kt { color: #B00040 }
30
+ #chatbot .m { color: #666666 }
31
+ #chatbot .s { color: #BA2121 }
32
+ #chatbot .na { color: #7D9029 }
33
+ #chatbot .nb { color: #008000 }
34
+ #chatbot .nc { color: #0000FF; font-weight: bold }
35
+ #chatbot .no { color: #880000 }
36
+ #chatbot .nd { color: #AA22FF }
37
+ #chatbot .ni { color: #999999; font-weight: bold }
38
+ #chatbot .ne { color: #D2413A; font-weight: bold }
39
+ #chatbot .nf { color: #0000FF }
40
+ #chatbot .nl { color: #A0A000 }
41
+ #chatbot .nn { color: #0000FF; font-weight: bold }
42
+ #chatbot .nt { color: #008000; font-weight: bold }
43
+ #chatbot .nv { color: #19177C }
44
+ #chatbot .ow { color: #AA22FF; font-weight: bold }
45
+ #chatbot .w { color: #bbbbbb }
46
+ #chatbot .mb { color: #666666 }
47
+ #chatbot .mf { color: #666666 }
48
+ #chatbot .mh { color: #666666 }
49
+ #chatbot .mi { color: #666666 }
50
+ #chatbot .mo { color: #666666 }
51
+ #chatbot .sa { color: #BA2121 }
52
+ #chatbot .sb { color: #BA2121 }
53
+ #chatbot .sc { color: #BA2121 }
54
+ #chatbot .dl { color: #BA2121 }
55
+ #chatbot .sd { color: #BA2121; font-style: italic }
56
+ #chatbot .s2 { color: #BA2121 }
57
+ #chatbot .se { color: #BB6622; font-weight: bold }
58
+ #chatbot .sh { color: #BA2121 }
59
+ #chatbot .si { color: #BB6688; font-weight: bold }
60
+ #chatbot .sx { color: #008000 }
61
+ #chatbot .sr { color: #BB6688 }
62
+ #chatbot .s1 { color: #BA2121 }
63
+ #chatbot .ss { color: #19177C }
64
+ #chatbot .bp { color: #008000 }
65
+ #chatbot .fm { color: #0000FF }
66
+ #chatbot .vc { color: #19177C }
67
+ #chatbot .vg { color: #19177C }
68
+ #chatbot .vi { color: #19177C }
69
+ #chatbot .vm { color: #19177C }
70
+ #chatbot .il { color: #666666 }
71
+ """)
72
+ #.highlight { background: #f8f8f8; }
gradio_web_server.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import json
4
+ import os
5
+ import time
6
+ import spaces
7
+ import gradio as gr
8
+ import requests
9
+
10
+ from conversation import (default_conversation, conv_templates,
11
+ SeparatorStyle)
12
+
13
+ LOGDIR = "."
14
+ from utils import (build_logger, server_error_msg,
15
+ violates_moderation, moderation_msg)
16
+ import hashlib
17
+
18
+
19
+ logger = build_logger("gradio_web_server", "gradio_web_server.log")
20
+
21
+ headers = {"User-Agent": "LLaVA Client"}
22
+
23
+ no_change_btn = gr.Button()
24
+ enable_btn = gr.Button(interactive=True)
25
+ disable_btn = gr.Button(interactive=False)
26
+
27
+ priority = {
28
+ "vicuna-13b": "aaaaaaa",
29
+ "koala-13b": "aaaaaab",
30
+ }
31
+
32
+
33
+ def get_conv_log_filename():
34
+ t = datetime.datetime.now()
35
+ name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
36
+ return name
37
+
38
+
39
+ def get_model_list():
40
+ ret = requests.post(args.controller_url + "/refresh_all_workers")
41
+ assert ret.status_code == 200
42
+ ret = requests.post(args.controller_url + "/list_models")
43
+ models = ret.json()["models"]
44
+ models.sort(key=lambda x: priority.get(x, x))
45
+ logger.info(f"Models: {models}")
46
+ return models
47
+
48
+
49
+ get_window_url_params = """
50
+ function() {
51
+ const params = new URLSearchParams(window.location.search);
52
+ url_params = Object.fromEntries(params);
53
+ console.log(url_params);
54
+ return url_params;
55
+ }
56
+ """
57
+
58
+
59
+ def load_demo(url_params, request: gr.Request):
60
+ logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
61
+
62
+ dropdown_update = gr.Dropdown(visible=True)
63
+ if "model" in url_params:
64
+ model = url_params["model"]
65
+ if model in models:
66
+ dropdown_update = gr.Dropdown(value=model, visible=True)
67
+
68
+ state = default_conversation.copy()
69
+ return state, dropdown_update
70
+
71
+
72
+ def load_demo_refresh_model_list(request: gr.Request):
73
+ logger.info(f"load_demo. ip: {request.client.host}")
74
+ # models = get_model_list()
75
+ models = [
76
+ "jadechoghari/Ferret-UI-Gemma2b"
77
+ ]
78
+ state = default_conversation.copy()
79
+ dropdown_update = gr.Dropdown(
80
+ choices=models,
81
+ value=models[0] if len(models) > 0 else ""
82
+ )
83
+ return state, dropdown_update
84
+
85
+
86
+ def vote_last_response(state, vote_type, model_selector, request: gr.Request):
87
+ with open(get_conv_log_filename(), "a") as fout:
88
+ data = {
89
+ "tstamp": round(time.time(), 4),
90
+ "type": vote_type,
91
+ "model": model_selector,
92
+ "state": state.dict(),
93
+ "ip": request.client.host,
94
+ }
95
+ fout.write(json.dumps(data) + "\n")
96
+
97
+
98
+ def upvote_last_response(state, model_selector, request: gr.Request):
99
+ logger.info(f"upvote. ip: {request.client.host}")
100
+ vote_last_response(state, "upvote", model_selector, request)
101
+ return ("",) + (disable_btn,) * 3
102
+
103
+
104
+ def downvote_last_response(state, model_selector, request: gr.Request):
105
+ logger.info(f"downvote. ip: {request.client.host}")
106
+ vote_last_response(state, "downvote", model_selector, request)
107
+ return ("",) + (disable_btn,) * 3
108
+
109
+
110
+ def flag_last_response(state, model_selector, request: gr.Request):
111
+ logger.info(f"flag. ip: {request.client.host}")
112
+ vote_last_response(state, "flag", model_selector, request)
113
+ return ("",) + (disable_btn,) * 3
114
+
115
+
116
+ def regenerate(state, image_process_mode, request: gr.Request):
117
+ logger.info(f"regenerate. ip: {request.client.host}")
118
+ state.messages[-1][-1] = None
119
+ prev_human_msg = state.messages[-2]
120
+ if type(prev_human_msg[1]) in (tuple, list):
121
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
122
+ state.skip_next = False
123
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
124
+
125
+
126
+ def clear_history(request: gr.Request):
127
+ logger.info(f"clear_history. ip: {request.client.host}")
128
+ state = default_conversation.copy()
129
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
130
+
131
+
132
+ def add_text(state, text, image, image_process_mode, request: gr.Request):
133
+ logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
134
+ if len(text) <= 0 and image is None:
135
+ state.skip_next = True
136
+ return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
137
+ if args.moderate:
138
+ flagged = violates_moderation(text)
139
+ if flagged:
140
+ state.skip_next = True
141
+ return (state, state.to_gradio_chatbot(), moderation_msg, None) + (
142
+ no_change_btn,) * 5
143
+
144
+ text = text[:1536] # Hard cut-off
145
+ if image is not None:
146
+ text = text[:1200] # Hard cut-off for images
147
+ if '<image>' not in text:
148
+ # text = '<Image><image></Image>' + text
149
+ text = text + '\n<image>'
150
+ text = (text, image, image_process_mode)
151
+ state = default_conversation.copy()
152
+ state.append_message(state.roles[0], text)
153
+ state.append_message(state.roles[1], None)
154
+ state.skip_next = False
155
+ return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
156
+
157
+ @spaces.GPU()
158
+ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request):
159
+ logger.info(f"http_bot. ip: {request.client.host}")
160
+ start_tstamp = time.time()
161
+ model_name = model_selector
162
+
163
+ if state.skip_next:
164
+ # This generate call is skipped due to invalid inputs
165
+ yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
166
+ return
167
+
168
+ if len(state.messages) == state.offset + 2:
169
+ # First round of conversation
170
+ if "llava" in model_name.lower():
171
+ if 'llama-2' in model_name.lower():
172
+ template_name = "llava_llama_2"
173
+ elif "mistral" in model_name.lower() or "mixtral" in model_name.lower():
174
+ if 'orca' in model_name.lower():
175
+ template_name = "mistral_orca"
176
+ elif 'hermes' in model_name.lower():
177
+ template_name = "chatml_direct"
178
+ else:
179
+ template_name = "mistral_instruct"
180
+ elif 'llava-v1.6-34b' in model_name.lower():
181
+ template_name = "chatml_direct"
182
+ elif "v1" in model_name.lower():
183
+ if 'mmtag' in model_name.lower():
184
+ template_name = "v1_mmtag"
185
+ elif 'plain' in model_name.lower() and 'finetune' not in model_name.lower():
186
+ template_name = "v1_mmtag"
187
+ else:
188
+ template_name = "llava_v1"
189
+ elif "mpt" in model_name.lower():
190
+ template_name = "mpt"
191
+ else:
192
+ if 'mmtag' in model_name.lower():
193
+ template_name = "v0_mmtag"
194
+ elif 'plain' in model_name.lower() and 'finetune' not in model_name.lower():
195
+ template_name = "v0_mmtag"
196
+ else:
197
+ template_name = "llava_v0"
198
+ elif "mpt" in model_name:
199
+ template_name = "mpt_text"
200
+ elif "llama-2" in model_name:
201
+ template_name = "llama_2"
202
+ elif "gemma" in model_name.lower():
203
+ template_name = "ferret_gemma_instruct"
204
+ print("conv mode to gemma")
205
+ else:
206
+ template_name = "vicuna_v1"
207
+ new_state = conv_templates[template_name].copy()
208
+ new_state.append_message(new_state.roles[0], state.messages[-2][1])
209
+ new_state.append_message(new_state.roles[1], None)
210
+ state = new_state
211
+
212
+ # Query worker address
213
+ controller_url = args.controller_url
214
+ ret = requests.post(controller_url + "/get_worker_address",
215
+ json={"model": model_name})
216
+ worker_addr = ret.json()["address"]
217
+ logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
218
+
219
+ # No available worker
220
+ if worker_addr == "":
221
+ state.messages[-1][-1] = server_error_msg
222
+ yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
223
+ return
224
+
225
+ # Construct prompt
226
+ prompt = state.get_prompt()
227
+
228
+ all_images = state.get_images(return_pil=True)
229
+ all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
230
+ for image, hash in zip(all_images, all_image_hash):
231
+ t = datetime.datetime.now()
232
+ filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
233
+ if not os.path.isfile(filename):
234
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
235
+ image.save(filename)
236
+
237
+ # Make requests
238
+ pload = {
239
+ "model": model_name,
240
+ "prompt": prompt,
241
+ "temperature": float(temperature),
242
+ "top_p": float(top_p),
243
+ "max_new_tokens": min(int(max_new_tokens), 1536),
244
+ "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
245
+ "images": f'List of {len(state.get_images())} images: {all_image_hash}',
246
+ }
247
+ logger.info(f"==== request ====\n{pload}")
248
+
249
+ pload['images'] = state.get_images()
250
+
251
+ state.messages[-1][-1] = "▌"
252
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
253
+
254
+ try:
255
+ # Stream output
256
+ response = requests.post(worker_addr + "/worker_generate_stream",
257
+ headers=headers, json=pload, stream=True, timeout=10)
258
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
259
+ if chunk:
260
+ data = json.loads(chunk.decode())
261
+ if data["error_code"] == 0:
262
+ output = data["text"][len(prompt):].strip()
263
+ state.messages[-1][-1] = output + "▌"
264
+ yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
265
+ else:
266
+ output = data["text"] + f" (error_code: {data['error_code']})"
267
+ state.messages[-1][-1] = output
268
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
269
+ return
270
+ time.sleep(0.03)
271
+ except requests.exceptions.RequestException as e:
272
+ state.messages[-1][-1] = server_error_msg
273
+ yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
274
+ return
275
+
276
+ state.messages[-1][-1] = state.messages[-1][-1][:-1]
277
+ yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
278
+
279
+ finish_tstamp = time.time()
280
+ logger.info(f"{output}")
281
+
282
+ with open(get_conv_log_filename(), "a") as fout:
283
+ data = {
284
+ "tstamp": round(finish_tstamp, 4),
285
+ "type": "chat",
286
+ "model": model_name,
287
+ "start": round(start_tstamp, 4),
288
+ "finish": round(finish_tstamp, 4),
289
+ "state": state.dict(),
290
+ "images": all_image_hash,
291
+ "ip": request.client.host,
292
+ }
293
+ fout.write(json.dumps(data) + "\n")
294
+
295
+ title_markdown = ("""
296
+ # 🌋 LLaVA: Large Language and Vision Assistant
297
+ [[Project Page](https://llava-vl.github.io)] [[Code](https://github.com/haotian-liu/LLaVA)] [[Model](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md)] | 📚 [[LLaVA](https://arxiv.org/abs/2304.08485)] [[LLaVA-v1.5](https://arxiv.org/abs/2310.03744)] [[LLaVA-v1.6](https://llava-vl.github.io/blog/2024-01-30-llava-1-6/)]
298
+ """)
299
+
300
+ tos_markdown = ("""
301
+ ### Terms of use
302
+ By using this service, users are required to agree to the following terms:
303
+ The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
304
+ Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
305
+ For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
306
+ """)
307
+
308
+
309
+ learn_more_markdown = ("""
310
+ ### License
311
+ The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
312
+ """)
313
+
314
+ block_css = """
315
+
316
+ #buttons button {
317
+ min-width: min(120px,100%);
318
+ }
319
+
320
+ """
321
+
322
+ def build_demo(embed_mode, cur_dir=None, concurrency_count=10):
323
+ textbox = gr.Textbox(show_label=False, placeholder="Enter text and press ENTER", container=False)
324
+ with gr.Blocks(title="LLaVA", theme=gr.themes.Default(), css=block_css) as demo:
325
+ state = gr.State()
326
+
327
+ if not embed_mode:
328
+ gr.Markdown(title_markdown)
329
+
330
+ with gr.Row():
331
+ models = [
332
+ "jadechoghari/Ferret-UI-Gemma2b"
333
+ ]
334
+ with gr.Column(scale=3):
335
+ with gr.Row(elem_id="model_selector_row"):
336
+ model_selector = gr.Dropdown(
337
+ choices=models,
338
+ value=models[0] if len(models) > 0 else "",
339
+ interactive=True,
340
+ show_label=False,
341
+ container=False)
342
+
343
+ imagebox = gr.Image(type="pil")
344
+ image_process_mode = gr.Radio(
345
+ ["Crop", "Resize", "Pad", "Default"],
346
+ value="Default",
347
+ label="Preprocess for non-square image", visible=False)
348
+
349
+ if cur_dir is None:
350
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
351
+ gr.Examples(examples=[
352
+ [f"{cur_dir}/examples/extreme_ironing.jpg", "What is unusual about this image?"],
353
+ [f"{cur_dir}/examples/waterview.jpg", "What are the things I should be cautious about when I visit here?"],
354
+ ], inputs=[imagebox, textbox])
355
+
356
+ with gr.Accordion("Parameters", open=False) as parameter_row:
357
+ temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
358
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
359
+ max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
360
+
361
+ with gr.Column(scale=8):
362
+ chatbot = gr.Chatbot(
363
+ elem_id="chatbot",
364
+ label="LLaVA Chatbot",
365
+ height=650,
366
+ layout="panel",
367
+ )
368
+ with gr.Row():
369
+ with gr.Column(scale=8):
370
+ textbox.render()
371
+ with gr.Column(scale=1, min_width=50):
372
+ submit_btn = gr.Button(value="Send", variant="primary")
373
+ with gr.Row(elem_id="buttons") as button_row:
374
+ upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
375
+ downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
376
+ flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
377
+ #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
378
+ regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
379
+ clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
380
+
381
+ if not embed_mode:
382
+ gr.Markdown(tos_markdown)
383
+ gr.Markdown(learn_more_markdown)
384
+ url_params = gr.JSON(visible=False)
385
+
386
+ # Register listeners
387
+ btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
388
+ upvote_btn.click(
389
+ upvote_last_response,
390
+ [state, model_selector],
391
+ [textbox, upvote_btn, downvote_btn, flag_btn]
392
+ )
393
+ downvote_btn.click(
394
+ downvote_last_response,
395
+ [state, model_selector],
396
+ [textbox, upvote_btn, downvote_btn, flag_btn]
397
+ )
398
+ flag_btn.click(
399
+ flag_last_response,
400
+ [state, model_selector],
401
+ [textbox, upvote_btn, downvote_btn, flag_btn]
402
+ )
403
+
404
+ regenerate_btn.click(
405
+ regenerate,
406
+ [state, image_process_mode],
407
+ [state, chatbot, textbox, imagebox] + btn_list
408
+ ).then(
409
+ http_bot,
410
+ [state, model_selector, temperature, top_p, max_output_tokens],
411
+ [state, chatbot] + btn_list,
412
+ concurrency_limit=concurrency_count
413
+ )
414
+
415
+ clear_btn.click(
416
+ clear_history,
417
+ None,
418
+ [state, chatbot, textbox, imagebox] + btn_list,
419
+ queue=False
420
+ )
421
+
422
+ textbox.submit(
423
+ add_text,
424
+ [state, textbox, imagebox, image_process_mode],
425
+ [state, chatbot, textbox, imagebox] + btn_list,
426
+ queue=False
427
+ ).then(
428
+ http_bot,
429
+ [state, model_selector, temperature, top_p, max_output_tokens],
430
+ [state, chatbot] + btn_list,
431
+ concurrency_limit=concurrency_count
432
+ )
433
+
434
+ submit_btn.click(
435
+ add_text,
436
+ [state, textbox, imagebox, image_process_mode],
437
+ [state, chatbot, textbox, imagebox] + btn_list
438
+ ).then(
439
+ http_bot,
440
+ [state, model_selector, temperature, top_p, max_output_tokens],
441
+ [state, chatbot] + btn_list,
442
+ concurrency_limit=concurrency_count
443
+ )
444
+
445
+ if args.model_list_mode == "once":
446
+ demo.load(
447
+ load_demo,
448
+ [url_params],
449
+ [state, model_selector],
450
+ js=get_window_url_params
451
+ )
452
+ elif args.model_list_mode == "reload":
453
+ demo.load(
454
+ load_demo_refresh_model_list,
455
+ None,
456
+ [state, model_selector],
457
+ queue=False
458
+ )
459
+ else:
460
+ raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
461
+
462
+ return demo
463
+
464
+
465
+ # if __name__ == "__main__":
466
+ # parser = argparse.ArgumentParser()
467
+ # parser.add_argument("--port", type=int, default=7860) # You can still specify the port
468
+ # parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
469
+ # parser.add_argument("--concurrency-count", type=int, default=16)
470
+ # parser.add_argument("--model-list-mode", type=str, default="once", choices=["once", "reload"])
471
+ # parser.add_argument("--share", action="store_true")
472
+ # parser.add_argument("--moderate", action="store_true")
473
+ # parser.add_argument("--embed", action="store_true")
474
+ # args = parser.parse_args()
475
+ # # models = get_model_list()
476
+ # demo = build_demo(args.embed, concurrency_count=args.concurrency_count)
477
+ # demo.queue(api_open=False).launch(
478
+ # server_port=args.port, # Specify the port if needed
479
+ # share=True,
480
+ # debug=True # All other functionalities like sharing still work
481
+ # )
482
+ if __name__ == "__main__":
483
+ parser = argparse.ArgumentParser()
484
+ parser.add_argument("--host", type=str, default="0.0.0.0")
485
+ parser.add_argument("--port", type=int)
486
+ parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
487
+ parser.add_argument("--concurrency-count", type=int, default=16)
488
+ parser.add_argument("--model-list-mode", type=str, default="once",
489
+ choices=["once", "reload"])
490
+ parser.add_argument("--share", action="store_true")
491
+ parser.add_argument("--moderate", action="store_true")
492
+ parser.add_argument("--embed", action="store_true")
493
+ args = parser.parse_args()
494
+ logger.info(f"args: {args}")
495
+
496
+ models = [
497
+ "jadechoghari/Ferret-UI-Gemma2b"
498
+ ]
499
+
500
+ logger.info(args)
501
+ demo = build_demo(args.embed, concurrency_count=args.concurrency_count)
502
+ demo.queue(
503
+ api_open=False
504
+ ).launch(
505
+ server_name=args.host,
506
+ server_port=8001,
507
+ share=True,
508
+ debug=True
509
+ )
inference.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import os
3
+ import subprocess
4
+ from PIL import Image, ImageDraw
5
+ import re
6
+ import json
7
+ import subprocess
8
+
9
+ def process_inference_results(results, process_image=False):
10
+ """
11
+ Process the inference results by:
12
+ 1. Adding bounding boxes on the image based on the coordinates in 'text'.
13
+ 2. Extracting and returning the text prompt.
14
+
15
+ :param results: List of inference results with bounding boxes in 'text'.
16
+ :return: (image, text)
17
+ """
18
+ processed_images = []
19
+ extracted_texts = []
20
+
21
+ for result in results:
22
+ image_path = result['image_path']
23
+ img = Image.open(image_path).convert("RGB")
24
+ draw = ImageDraw.Draw(img)
25
+
26
+ bbox_str = re.search(r'\[\[([0-9,\s]+)\]\]', result['text'])
27
+ if bbox_str:
28
+ bbox = [int(coord) for coord in bbox_str.group(1).split(',')]
29
+ x1, y1, x2, y2 = bbox
30
+
31
+ draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
32
+
33
+ extracted_texts.append(result['text'])
34
+
35
+ processed_images.append(img)
36
+
37
+ if process_image:
38
+ return processed_images, extracted_texts
39
+
40
+ return extracted_texts
41
+
42
+ def inference_and_run(image_path, prompt, conv_mode="ferret_gemma_instruct", model_path="jadechoghari/Ferret-UI-Gemma2b", box=None, process_image=False):
43
+ """
44
+ Run the inference and capture the errors for debugging.
45
+ """
46
+ data_input = [{
47
+ "id": 0,
48
+ "image": os.path.basename(image_path),
49
+ "image_h": Image.open(image_path).height,
50
+ "image_w": Image.open(image_path).width,
51
+ "conversations": [{"from": "human", "value": f"<image>\n{prompt}"}]
52
+ }]
53
+
54
+ if box:
55
+ data_input[0]["box_x1y1x2y2"] = [[box]]
56
+
57
+ with open("eval.json", "w") as json_file:
58
+ json.dump(data_input, json_file)
59
+
60
+ print("eval.json file created successfully.")
61
+
62
+ cmd = [
63
+ "python", "-m", "model_UI",
64
+ "--model_path", model_path,
65
+ "--data_path", "eval.json",
66
+ "--image_path", ".",
67
+ "--answers_file", "eval_output.jsonl",
68
+ "--num_beam", "1",
69
+ "--max_new_tokens", "32",
70
+ "--conv_mode", conv_mode
71
+ ]
72
+
73
+ if box:
74
+ cmd.extend(["--region_format", "box", "--add_region_feature"])
75
+
76
+ try:
77
+ result = subprocess.run(cmd, check=True, capture_output=True, text=True)
78
+ print(f"Subprocess output:\n{result.stdout}")
79
+ print(f"Subprocess error (if any):\n{result.stderr}")
80
+ print(f"Inference completed. Output written to eval_output.jsonl")
81
+
82
+ output_folder = 'eval_output.jsonl'
83
+ if os.path.exists(output_folder):
84
+ json_files = [f for f in os.listdir(output_folder) if f.endswith(".jsonl")]
85
+ if json_files:
86
+ output_file_path = os.path.join(output_folder, json_files[0])
87
+ with open(output_file_path, "r") as output_file:
88
+ results = [json.loads(line) for line in output_file]
89
+
90
+ return process_inference_results(results, process_image)
91
+ else:
92
+ print("No output JSONL files found.")
93
+ return None, None
94
+ else:
95
+ print("Output folder not found.")
96
+ return None, None
97
+
98
+ except subprocess.CalledProcessError as e:
99
+ print(f"Error occurred during inference:\n{e}")
100
+ print(f"Subprocess output:\n{e.output}")
101
+ return None, None
mm_utils.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from io import BytesIO
3
+ import base64
4
+ import torch
5
+ import math
6
+ import ast
7
+ from typing import Optional, Callable
8
+
9
+ from transformers import StoppingCriteria
10
+ IMAGE_TOKEN_INDEX = -200
11
+
12
+
13
+ def select_best_resolution(original_size, possible_resolutions):
14
+ """
15
+ Selects the best resolution from a list of possible resolutions based on the original size.
16
+
17
+ Args:
18
+ original_size (tuple): The original size of the image in the format (width, height).
19
+ possible_resolutions (list): A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
20
+
21
+ Returns:
22
+ tuple: The best fit resolution in the format (width, height).
23
+ """
24
+ original_width, original_height = original_size
25
+ best_fit = None
26
+ max_effective_resolution = 0
27
+ min_wasted_resolution = float('inf')
28
+
29
+ for width, height in possible_resolutions:
30
+ scale = min(width / original_width, height / original_height)
31
+ downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
32
+ effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
33
+ wasted_resolution = (width * height) - effective_resolution
34
+
35
+ if effective_resolution > max_effective_resolution or (effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution):
36
+ max_effective_resolution = effective_resolution
37
+ min_wasted_resolution = wasted_resolution
38
+ best_fit = (width, height)
39
+
40
+ return best_fit
41
+
42
+
43
+ def resize_and_pad_image(image, target_resolution, is_pad=False):
44
+ """
45
+ Resize and pad an image to a target resolution while maintaining aspect ratio.
46
+ Args:
47
+ image (PIL.Image.Image): The input image.
48
+ target_resolution (tuple): The target resolution (width, height) of the image.
49
+ Returns:
50
+ PIL.Image.Image: The resized and padded image.
51
+ """
52
+ original_width, original_height = image.size
53
+ target_width, target_height = target_resolution
54
+
55
+ if is_pad:
56
+ scale_w = target_width / original_width
57
+ scale_h = target_height / original_height
58
+
59
+ if scale_w < scale_h:
60
+ new_width = target_width
61
+ new_height = min(math.ceil(original_height * scale_w), target_height)
62
+ else:
63
+ new_height = target_height
64
+ new_width = min(math.ceil(original_width * scale_h), target_width)
65
+
66
+ # Resize the image
67
+ resized_image = image.resize((new_width, new_height))
68
+
69
+ new_image = Image.new('RGB', (target_width, target_height), (0, 0, 0))
70
+ paste_x = (target_width - new_width) // 2
71
+ paste_y = (target_height - new_height) // 2
72
+ new_image.paste(resized_image, (paste_x, paste_y))
73
+ else:
74
+ new_image = image.resize((target_width, target_height))
75
+
76
+ return new_image
77
+
78
+
79
+ def divide_to_patches(image, patch_size):
80
+ """
81
+ Divides an image into patches of a specified size.
82
+
83
+ Args:
84
+ image (PIL.Image.Image): The input image.
85
+ patch_size (int): The size of each patch.
86
+
87
+ Returns:
88
+ list: A list of PIL.Image.Image objects representing the patches.
89
+ """
90
+ patches = []
91
+ width, height = image.size
92
+ for i in range(0, height, patch_size):
93
+ for j in range(0, width, patch_size):
94
+ box = (j, i, j + patch_size, i + patch_size)
95
+ patch = image.crop(box)
96
+ patches.append(patch)
97
+
98
+ return patches
99
+
100
+
101
+ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
102
+ """
103
+ Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
104
+
105
+ Args:
106
+ image_size (tuple): The size of the input image in the format (width, height).
107
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
108
+ patch_size (int): The size of each image patch.
109
+
110
+ Returns:
111
+ tuple: The shape of the image patch grid in the format (width, height).
112
+ """
113
+ if type(grid_pinpoints) is list:
114
+ possible_resolutions = grid_pinpoints
115
+ else:
116
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
117
+ width, height = select_best_resolution(image_size, possible_resolutions)
118
+ return width // patch_size, height // patch_size
119
+
120
+
121
+ def process_anyres_image(image, processor, grid_pinpoints, image_process_func: Optional[Callable] = None):
122
+ """
123
+ Process an image with variable resolutions.
124
+
125
+ Args:
126
+ image (PIL.Image.Image): The input image to be processed.
127
+ processor: The image processor object.
128
+ grid_pinpoints (str): A string representation of a list of possible resolutions.
129
+
130
+ Returns:
131
+ torch.Tensor: A tensor containing the processed image patches.
132
+ """
133
+ if type(grid_pinpoints) is list:
134
+ possible_resolutions = grid_pinpoints
135
+ else:
136
+ possible_resolutions = ast.literal_eval(grid_pinpoints)
137
+
138
+ best_resolution = select_best_resolution(image.size, possible_resolutions)
139
+
140
+ # FIXME: not sure if do_pad or undo_pad may affect the referring side
141
+ image_padded = resize_and_pad_image(image, best_resolution, is_pad=False)
142
+
143
+ patches = divide_to_patches(image_padded, processor.crop_size['height'])
144
+
145
+ if image_process_func:
146
+ resized_image_h, resized_image_w = image_process_func.keywords['size']
147
+ image_original_resize = image.resize((resized_image_w, resized_image_h))
148
+ image_patches = [image_original_resize] + patches
149
+ image_patches = [image_process_func(image_patch)['pixel_values'][0]
150
+ for image_patch in image_patches]
151
+ else:
152
+ image_original_resize = image.resize((processor.size['shortest_edge'], processor.size['shortest_edge']))
153
+ image_patches = [image_original_resize] + patches
154
+ image_patches = [processor.preprocess(image_patch, return_tensors='pt')['pixel_values'][0]
155
+ for image_patch in image_patches]
156
+
157
+ return torch.stack(image_patches, dim=0)
158
+
159
+
160
+ def load_image_from_base64(image):
161
+ return Image.open(BytesIO(base64.b64decode(image)))
162
+
163
+
164
+ def expand2square(pil_img, background_color):
165
+ width, height = pil_img.size
166
+ if width == height:
167
+ return pil_img
168
+ elif width > height:
169
+ result = Image.new(pil_img.mode, (width, width), background_color)
170
+ result.paste(pil_img, (0, (width - height) // 2))
171
+ return result
172
+ else:
173
+ result = Image.new(pil_img.mode, (height, height), background_color)
174
+ result.paste(pil_img, ((height - width) // 2, 0))
175
+ return result
176
+
177
+
178
+ def process_images(images, image_processor, model_cfg, image_process_func: Optional[Callable] = None):
179
+ image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
180
+ new_images = []
181
+ if image_aspect_ratio == 'pad':
182
+ for image in images:
183
+ image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
184
+ image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
185
+ new_images.append(image)
186
+ elif image_aspect_ratio == "anyres":
187
+ # image_processor(images, return_tensors='pt', do_resize=True, do_center_crop=False, size=[image_h, image_w])['pixel_values']
188
+ for image in images:
189
+ image = process_anyres_image(image, image_processor, model_cfg.image_grid_pinpoints, image_process_func=image_process_func)
190
+ new_images.append(image)
191
+ else:
192
+ return image_processor(images, return_tensors='pt')['pixel_values']
193
+ if all(x.shape == new_images[0].shape for x in new_images):
194
+ new_images = torch.stack(new_images, dim=0)
195
+ return new_images
196
+
197
+
198
+ def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
199
+ prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
200
+
201
+ def insert_separator(X, sep):
202
+ return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
203
+
204
+ input_ids = []
205
+ offset = 0
206
+ if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
207
+ offset = 1
208
+ input_ids.append(prompt_chunks[0][0])
209
+
210
+ for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
211
+ input_ids.extend(x[offset:])
212
+
213
+ if return_tensors is not None:
214
+ if return_tensors == 'pt':
215
+ return torch.tensor(input_ids, dtype=torch.long)
216
+ raise ValueError(f'Unsupported tensor type: {return_tensors}')
217
+ return input_ids
218
+
219
+
220
+ def get_model_name_from_path(model_path):
221
+ model_path = model_path.strip("/")
222
+ model_paths = model_path.split("/")
223
+ if model_paths[-1].startswith('checkpoint-'):
224
+ return model_paths[-2] + "_" + model_paths[-1]
225
+ else:
226
+ return model_paths[-1]
227
+
228
+ class KeywordsStoppingCriteria(StoppingCriteria):
229
+ def __init__(self, keywords, tokenizer, input_ids):
230
+ self.keywords = keywords
231
+ self.keyword_ids = []
232
+ self.max_keyword_len = 0
233
+ for keyword in keywords:
234
+ cur_keyword_ids = tokenizer(keyword).input_ids
235
+ if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
236
+ cur_keyword_ids = cur_keyword_ids[1:]
237
+ if len(cur_keyword_ids) > self.max_keyword_len:
238
+ self.max_keyword_len = len(cur_keyword_ids)
239
+ self.keyword_ids.append(torch.tensor(cur_keyword_ids))
240
+ self.tokenizer = tokenizer
241
+ self.start_len = input_ids.shape[1]
242
+
243
+ def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
244
+ offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
245
+ self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
246
+ for keyword_id in self.keyword_ids:
247
+ truncated_output_ids = output_ids[0, -keyword_id.shape[0]:]
248
+ if torch.equal(truncated_output_ids, keyword_id):
249
+ return True
250
+ outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
251
+ for keyword in self.keywords:
252
+ if keyword in outputs:
253
+ return True
254
+ return False
255
+
256
+ def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
257
+ outputs = []
258
+ for i in range(output_ids.shape[0]):
259
+ outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))
260
+ return all(outputs)
model_UI.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import os
4
+ import json
5
+ from tqdm import tqdm
6
+
7
+ IMAGE_TOKEN_INDEX = -200
8
+ DEFAULT_IMAGE_TOKEN = "<image>"
9
+ DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
10
+ DEFAULT_IM_START_TOKEN = "<im_start>"
11
+ DEFAULT_IM_END_TOKEN = "<im_end>"
12
+ IMAGE_PLACEHOLDER = "<image-placeholder>"
13
+
14
+ # Added by Ferret
15
+ DEFAULT_REGION_FEA_TOKEN = "<region_fea>"
16
+ VOCAB_IMAGE_W = 1000
17
+ VOCAB_IMAGE_H = 1000
18
+ from conversation import conv_templates, SeparatorStyle
19
+ from builder import load_pretrained_model
20
+
21
+ from mm_utils import tokenizer_image_token, process_images
22
+
23
+ from PIL import Image
24
+ import math
25
+ import pdb
26
+ import numpy as np
27
+ from copy import deepcopy
28
+ from functools import partial
29
+
30
+ def disable_torch_init():
31
+ """
32
+ Disable the redundant torch default initialization to accelerate model creation.
33
+ """
34
+ import torch
35
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
36
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
37
+
38
+ def split_list(lst, n):
39
+ """Split a list into n (roughly) equal-sized chunks"""
40
+ chunk_size = math.ceil(len(lst) / n) # integer division
41
+ return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
42
+
43
+ def get_chunk(lst, n, k):
44
+ chunks = split_list(lst, n)
45
+ return chunks[k]
46
+
47
+ def generate_mask_for_feature(coor, raw_w, raw_h, mask=None):
48
+ if mask is not None:
49
+ assert mask.shape[0] == raw_w and mask.shape[1] == raw_h
50
+ coor_mask = np.zeros((raw_w, raw_h))
51
+ # Assume it samples a point.
52
+ if len(coor) == 2:
53
+ # Define window size
54
+ span = 5
55
+ # Make sure the window does not exceed array bounds
56
+ x_min = max(0, coor[0] - span)
57
+ x_max = min(raw_w, coor[0] + span + 1)
58
+ y_min = max(0, coor[1] - span)
59
+ y_max = min(raw_h, coor[1] + span + 1)
60
+ coor_mask[int(x_min):int(x_max), int(y_min):int(y_max)] = 1
61
+ assert (coor_mask==1).any(), f"coor: {coor}, raw_w: {raw_w}, raw_h: {raw_h}"
62
+ elif len(coor) == 4:
63
+ # Box input or Sketch input.
64
+ coor_mask[coor[0]:coor[2]+1, coor[1]:coor[3]+1] = 1
65
+ if mask is not None:
66
+ coor_mask = coor_mask * mask
67
+ coor_mask = torch.from_numpy(coor_mask)
68
+ try:
69
+ assert len(coor_mask.nonzero()) != 0
70
+ except:
71
+ pdb.set_trace()
72
+ return coor_mask
73
+
74
+ def get_task_from_file(file):
75
+ box_in_tasks = ['widgetcaptions', 'taperception', 'ocr', 'icon_recognition', 'widget_classification', 'example_0']
76
+ # box_out_tasks = ['widget_listing', 'find_text', 'find_icons', 'find_widget', 'conversation_interaction']
77
+ # no_box = ['screen2words', 'detailed_description', 'conversation_perception', 'gpt4']
78
+ if any(task in file for task in box_in_tasks):
79
+ return 'box_in'
80
+ else:
81
+ return 'no_box_in'
82
+ # elif any(task in file for task in box_out_tasks):
83
+ # return 'box_out'
84
+ # elif any(task in file for task in no_box):
85
+ # return 'no_box'
86
+
87
+ def get_bbox_coor(box, ratio_w, ratio_h):
88
+ return box[0] * ratio_w, box[1] * ratio_h, box[2] * ratio_w, box[3] * ratio_h
89
+
90
+ def get_model_name_from_path(model_path):
91
+ if 'gemma' in model_path:
92
+ return 'ferret_gemma'
93
+ elif 'llama' or 'vicuna' in model_path:
94
+ return 'ferret_llama'
95
+ else:
96
+ raise ValueError(f"No model matched for {model_path}")
97
+
98
+ class UIData:
99
+ def __init__(self, data_path, image_path, args) -> None:
100
+ self.obj_list = json.load(open(data_path, 'r'))
101
+ self.image_path = image_path
102
+ self.args = args
103
+ self._ids = range(len(self.obj_list))
104
+ self.task = get_task_from_file(data_path)
105
+
106
+ @property
107
+ def ids(self):
108
+ return deepcopy(self._ids)
109
+
110
+ def __getitem__(self, idx):
111
+ i = self.obj_list[idx]
112
+
113
+ # image stuff
114
+ image_path_i = os.path.join(self.image_path, i['image'].split('/')[-1])
115
+ image = Image.open(image_path_i).convert('RGB')
116
+
117
+ q_turn = i['conversations'][0]['value']
118
+ if "<image>" in q_turn:
119
+ prompt = q_turn.split('\n')[1]
120
+ else:
121
+ prompt = q_turn
122
+ i['question'] = prompt
123
+ i['region_masks'] = None
124
+
125
+ if self.task == 'box_in':
126
+ ratio_w = VOCAB_IMAGE_W * 1.0 / i['image_w']
127
+ ratio_h = VOCAB_IMAGE_H * 1.0 / i['image_h']
128
+
129
+ box = i['box_x1y1x2y2'][0][0]
130
+ box_x1, box_y1, box_x2, box_y2 = box
131
+ box_x1_textvocab, box_y1_textvocab, box_x2_textvocab, box_y2_textvocab = get_bbox_coor(box=box, ratio_h=ratio_h, ratio_w=ratio_w)
132
+
133
+ if self.args.region_format == 'box':
134
+ region_coordinate_raw = [box_x1, box_y1, box_x2, box_y2]
135
+ if args.add_region_feature:
136
+ i['question'] = prompt.replace('<bbox_location0>', '[{}, {}, {}, {}] {}'.format(int(box_x1_textvocab), int(box_y1_textvocab), int(box_x2_textvocab), int(box_y2_textvocab), DEFAULT_REGION_FEA_TOKEN))
137
+ generated_mask = generate_mask_for_feature(region_coordinate_raw, raw_w=i['image_w'], raw_h=i['image_h'], mask=None)
138
+ i['region_masks'] = [generated_mask]
139
+ else:
140
+ i['question'] = prompt.replace('<bbox_location0>', '[{}, {}, {}, {}]'.format(int(box_x1_textvocab), int(box_y1_textvocab), int(box_x2_textvocab), int(box_y2_textvocab)))
141
+ else:
142
+ raise NotImplementedError(f'{self.args.region_format} is not supported.')
143
+
144
+ return image, i, image.size
145
+
146
+ def eval_model(args):
147
+ # Data
148
+ dataset = UIData(data_path=args.data_path, image_path=args.image_path, args=args)
149
+ data_ids = dataset.ids
150
+
151
+ # Model
152
+ disable_torch_init()
153
+ model_path = os.path.expanduser(args.model_path)
154
+ model_name = get_model_name_from_path(model_path)
155
+ tokenizer, model, image_processor, context_len = \
156
+ load_pretrained_model(model_path, args.model_base, model_name)
157
+
158
+ chunk_data_ids = get_chunk(data_ids, args.num_chunks, args.chunk_idx)
159
+ answers_folder = os.path.expanduser(args.answers_file)
160
+ os.makedirs(answers_folder, exist_ok=True)
161
+ answers_file = os.path.join(answers_folder, f'{args.chunk_idx}_of_{args.num_chunks}.jsonl')
162
+ ans_file = open(answers_file, "w")
163
+
164
+ for i, id in enumerate(tqdm(chunk_data_ids)):
165
+ img, ann, image_size = dataset[id]
166
+ image_path = ann['image']
167
+ qs = ann["question"]
168
+ cur_prompt = qs
169
+
170
+ if "<image>" in qs:
171
+ qs = qs.split('\n')[1]
172
+
173
+ if model.config.mm_use_im_start_end:
174
+ qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
175
+ else:
176
+ qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
177
+
178
+ conv = conv_templates[args.conv_mode].copy()
179
+ conv.append_message(conv.roles[0], qs)
180
+ conv.append_message(conv.roles[1], None)
181
+ prompt = conv.get_prompt()
182
+
183
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
184
+
185
+ if model.config.image_aspect_ratio == "square_nocrop":
186
+ image_tensor = image_processor.preprocess(img, return_tensors='pt', do_resize=True,
187
+ do_center_crop=False, size=[args.image_h, args.image_w])['pixel_values'][0]
188
+ elif model.config.image_aspect_ratio == "anyres":
189
+ image_process_func = partial(image_processor.preprocess, return_tensors='pt', do_resize=True, do_center_crop=False, size=[args.image_h, args.image_w])
190
+ image_tensor = process_images([img], image_processor, model.config, image_process_func=image_process_func)[0]
191
+ else:
192
+ image_tensor = process_images([img], image_processor, model.config)[0]
193
+
194
+ images = image_tensor.unsqueeze(0).to(args.data_type).cuda()
195
+
196
+ region_masks = ann['region_masks']
197
+
198
+ if region_masks is not None:
199
+ region_masks = [[region_mask_i.cuda().half() for region_mask_i in region_masks]]
200
+ else:
201
+ region_masks = None
202
+
203
+ with torch.inference_mode():
204
+ model.orig_forward = model.forward
205
+ model.forward = partial(
206
+ model.orig_forward,
207
+ region_masks=region_masks
208
+ )
209
+ output_ids = model.generate(
210
+ input_ids,
211
+ images=images,
212
+ region_masks=region_masks,
213
+ image_sizes=[image_size],
214
+ do_sample=True if args.temperature > 0 else False,
215
+ temperature=args.temperature,
216
+ top_p=args.top_p,
217
+ num_beams=args.num_beams,
218
+ max_new_tokens=args.max_new_tokens,
219
+ use_cache=True)
220
+ model.forward = model.orig_forward
221
+
222
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
223
+ outputs = outputs.strip()
224
+
225
+ if 'label' in ann:
226
+ label = ann['label']
227
+ elif len(ann['conversations']) > 1:
228
+ label = ann['conversations'][1]['value']
229
+ else:
230
+ label = None
231
+
232
+ ans_file.write(json.dumps({"id":ann['id'], # +1 offset
233
+ "image_path":image_path,
234
+ "prompt": cur_prompt,
235
+ "text": outputs,
236
+ "label": label,
237
+ }) + "\n")
238
+ ans_file.flush()
239
+ ans_file.close()
240
+
241
+
242
+ if __name__ == "__main__":
243
+ parser = argparse.ArgumentParser()
244
+ parser.add_argument("--model_path", type=str, default="facebook/opt-350m")
245
+ parser.add_argument("--vision_model_path", type=str, default=None)
246
+ parser.add_argument("--model_base", type=str, default=None)
247
+ parser.add_argument("--image_path", type=str, default="")
248
+ parser.add_argument("--data_path", type=str, default="")
249
+ parser.add_argument("--answers_file", type=str, default="")
250
+ parser.add_argument("--conv_mode", type=str, default="ferret_gemma_instruct",
251
+ help="[ferret_gemma_instruct,ferret_llama_3,ferret_vicuna_v1]")
252
+ parser.add_argument("--num_chunks", type=int, default=1)
253
+ parser.add_argument("--chunk_idx", type=int, default=0)
254
+ parser.add_argument("--image_w", type=int, default=336) # 224
255
+ parser.add_argument("--image_h", type=int, default=336) # 224
256
+ parser.add_argument("--add_region_feature", action="store_true")
257
+ parser.add_argument("--region_format", type=str, default="point", choices=["point", "box", "segment", "free_shape"])
258
+ parser.add_argument("--no_coor", action="store_true")
259
+ parser.add_argument("--temperature", type=float, default=0.001)
260
+ parser.add_argument("--top_p", type=float, default=None)
261
+ parser.add_argument("--num_beams", type=int, default=1)
262
+ parser.add_argument("--max_new_tokens", type=int, default=1024)
263
+ parser.add_argument("--data_type", type=str, default='fp16', choices=['fp16', 'bf16', 'fp32'])
264
+ args = parser.parse_args()
265
+
266
+ if args.data_type == 'fp16':
267
+ args.data_type = torch.float16
268
+ elif args.data_type == 'bf16':
269
+ args.data_type = torch.bfloat16
270
+ else:
271
+ args.data_type = torch.float32
272
+
273
+ eval_model(args)
model_worker.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A model worker executes the model.
3
+ """
4
+ import argparse
5
+ import asyncio
6
+ import json
7
+ import time
8
+ import threading
9
+ import uuid
10
+
11
+ from fastapi import FastAPI, Request, BackgroundTasks
12
+ from fastapi.responses import StreamingResponse
13
+ import requests
14
+ import torch
15
+ import uvicorn
16
+ from functools import partial
17
+
18
+ from constants import WORKER_HEART_BEAT_INTERVAL
19
+ from utils import (build_logger, server_error_msg,
20
+ pretty_print_semaphore)
21
+ from builder import load_pretrained_model
22
+ from mm_utils import process_images, load_image_from_base64, tokenizer_image_token
23
+ from constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
24
+ from transformers import TextIteratorStreamer
25
+ from threading import Thread
26
+
27
+
28
+ GB = 1 << 30
29
+
30
+ worker_id = str(uuid.uuid4())[:6]
31
+ logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
32
+ global_counter = 0
33
+
34
+ model_semaphore = None
35
+
36
+
37
+ def heart_beat_worker(controller):
38
+
39
+ while True:
40
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
41
+ controller.send_heart_beat()
42
+
43
+
44
+ class ModelWorker:
45
+ def __init__(self, controller_addr, worker_addr,
46
+ worker_id, no_register,
47
+ model_path, model_base, model_name,
48
+ load_8bit, load_4bit, device, use_flash_attn=False):
49
+ self.controller_addr = controller_addr
50
+ self.worker_addr = worker_addr
51
+ self.worker_id = worker_id
52
+ if model_path.endswith("/"):
53
+ model_path = model_path[:-1]
54
+ if model_name is None:
55
+ model_paths = model_path.split("/")
56
+ if model_paths[-1].startswith('checkpoint-'):
57
+ self.model_name = model_paths[-2] + "_" + model_paths[-1]
58
+ else:
59
+ self.model_name = model_paths[-1]
60
+ else:
61
+ self.model_name = model_name
62
+
63
+ self.device = device
64
+ logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
65
+ self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model(
66
+ model_path, model_base, self.model_name, load_8bit, load_4bit)
67
+ self.is_multimodal = 'llava' in self.model_name.lower()
68
+
69
+ if not no_register:
70
+ self.register_to_controller()
71
+ self.heart_beat_thread = threading.Thread(
72
+ target=heart_beat_worker, args=(self,), daemon=True)
73
+ self.heart_beat_thread.start()
74
+
75
+ def register_to_controller(self):
76
+ logger.info("Register to controller")
77
+
78
+ url = self.controller_addr + "/register_worker"
79
+ data = {
80
+ "worker_name": self.worker_addr,
81
+ "check_heart_beat": True,
82
+ "worker_status": self.get_status()
83
+ }
84
+ r = requests.post(url, json=data)
85
+ assert r.status_code == 200
86
+
87
+ def send_heart_beat(self):
88
+ logger.info(f"Send heart beat. Models: {[self.model_name]}. "
89
+ f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
90
+ f"global_counter: {global_counter}")
91
+
92
+ url = self.controller_addr + "/receive_heart_beat"
93
+
94
+ while True:
95
+ try:
96
+ ret = requests.post(url, json={
97
+ "worker_name": self.worker_addr,
98
+ "queue_length": self.get_queue_length()}, timeout=5)
99
+ exist = ret.json()["exist"]
100
+ break
101
+ except requests.exceptions.RequestException as e:
102
+ logger.error(f"heart beat error: {e}")
103
+ time.sleep(5)
104
+
105
+ if not exist:
106
+ self.register_to_controller()
107
+
108
+ def get_queue_length(self):
109
+ if model_semaphore is None:
110
+ return 0
111
+ else:
112
+ return args.limit_model_concurrency - model_semaphore._value + (len(
113
+ model_semaphore._waiters) if model_semaphore._waiters is not None else 0)
114
+
115
+ def get_status(self):
116
+ return {
117
+ "model_names": [self.model_name],
118
+ "speed": 1,
119
+ "queue_length": self.get_queue_length(),
120
+ }
121
+
122
+ @torch.inference_mode()
123
+ def generate_stream(self, params):
124
+ tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor
125
+
126
+ prompt = params["prompt"]
127
+ ori_prompt = prompt
128
+ images = params.get("images", None)
129
+ num_image_tokens = 0
130
+ if images is not None and len(images) > 0 and self.is_multimodal:
131
+ if len(images) > 0:
132
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
133
+ raise ValueError("Number of images does not match number of <image> tokens in prompt")
134
+
135
+ images = [load_image_from_base64(image) for image in images]
136
+ image_sizes = [image.size for image in images]
137
+ images = process_images(images, image_processor, model.config)
138
+
139
+ if type(images) is list:
140
+ images = [image.to(self.model.device, dtype=torch.float16) for image in images]
141
+ else:
142
+ images = images.to(self.model.device, dtype=torch.float16)
143
+
144
+ replace_token = DEFAULT_IMAGE_TOKEN
145
+ if getattr(self.model.config, 'mm_use_im_start_end', False):
146
+ replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
147
+ prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
148
+
149
+ num_image_tokens = prompt.count(replace_token) * model.get_vision_tower().num_patches
150
+ else:
151
+ images = None
152
+ image_sizes = None
153
+ image_args = {"images": images, "image_sizes": image_sizes}
154
+ else:
155
+ images = None
156
+ image_args = {}
157
+
158
+ temperature = float(params.get("temperature", 1.0))
159
+ top_p = float(params.get("top_p", 1.0))
160
+ max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
161
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
162
+ stop_str = params.get("stop", None)
163
+ do_sample = True if temperature > 0.001 else False
164
+
165
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(self.device)
166
+ keywords = [stop_str]
167
+ # stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
168
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=15)
169
+
170
+ max_new_tokens = min(max_new_tokens, max_context_length - input_ids.shape[-1] - num_image_tokens)
171
+
172
+ if max_new_tokens < 1:
173
+ yield json.dumps({"text": ori_prompt + "Exceeds max token length. Please start a new conversation, thanks.", "error_code": 0}).encode() + b"\0"
174
+ return
175
+
176
+ thread = Thread(target=model.generate, kwargs=dict(
177
+ inputs=input_ids,
178
+ do_sample=do_sample,
179
+ temperature=temperature,
180
+ top_p=top_p,
181
+ max_new_tokens=max_new_tokens,
182
+ streamer=streamer,
183
+ use_cache=True,
184
+ **image_args
185
+ ))
186
+ thread.start()
187
+
188
+ generated_text = ori_prompt
189
+ for new_text in streamer:
190
+ generated_text += new_text
191
+ if generated_text.endswith(stop_str):
192
+ generated_text = generated_text[:-len(stop_str)]
193
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode() + b"\0"
194
+
195
+ def generate_stream_gate(self, params):
196
+ try:
197
+ for x in self.generate_stream(params):
198
+ yield x
199
+ except ValueError as e:
200
+ print("Caught ValueError:", e)
201
+ ret = {
202
+ "text": server_error_msg,
203
+ "error_code": 1,
204
+ }
205
+ yield json.dumps(ret).encode() + b"\0"
206
+ except torch.cuda.CudaError as e:
207
+ print("Caught torch.cuda.CudaError:", e)
208
+ ret = {
209
+ "text": server_error_msg,
210
+ "error_code": 1,
211
+ }
212
+ yield json.dumps(ret).encode() + b"\0"
213
+ except Exception as e:
214
+ print("Caught Unknown Error", e)
215
+ ret = {
216
+ "text": server_error_msg,
217
+ "error_code": 1,
218
+ }
219
+ yield json.dumps(ret).encode() + b"\0"
220
+
221
+
222
+ app = FastAPI()
223
+
224
+
225
+ def release_model_semaphore(fn=None):
226
+ model_semaphore.release()
227
+ if fn is not None:
228
+ fn()
229
+
230
+
231
+ @app.post("/worker_generate_stream")
232
+ async def generate_stream(request: Request):
233
+ global model_semaphore, global_counter
234
+ global_counter += 1
235
+ params = await request.json()
236
+
237
+ if model_semaphore is None:
238
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
239
+ await model_semaphore.acquire()
240
+ worker.send_heart_beat()
241
+ generator = worker.generate_stream_gate(params)
242
+ background_tasks = BackgroundTasks()
243
+ background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
244
+ return StreamingResponse(generator, background=background_tasks)
245
+
246
+
247
+ @app.post("/worker_get_status")
248
+ async def get_status(request: Request):
249
+ return worker.get_status()
250
+
251
+
252
+ if __name__ == "__main__":
253
+ parser = argparse.ArgumentParser()
254
+ parser.add_argument("--host", type=str, default="localhost")
255
+ parser.add_argument("--port", type=int, default=21002)
256
+ parser.add_argument("--worker-address", type=str,
257
+ default="http://localhost:21002")
258
+ parser.add_argument("--controller-address", type=str,
259
+ default="http://localhost:21001")
260
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
261
+ parser.add_argument("--model-base", type=str, default=None)
262
+ parser.add_argument("--model-name", type=str)
263
+ parser.add_argument("--device", type=str, default="cuda")
264
+ parser.add_argument("--multi-modal", action="store_true", help="Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
265
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
266
+ parser.add_argument("--stream-interval", type=int, default=1)
267
+ parser.add_argument("--no-register", action="store_true")
268
+ parser.add_argument("--load-8bit", action="store_true")
269
+ parser.add_argument("--load-4bit", action="store_true")
270
+ parser.add_argument("--use-flash-attn", action="store_true")
271
+ args = parser.parse_args()
272
+ logger.info(f"args: {args}")
273
+
274
+ if args.multi_modal:
275
+ logger.warning("Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
276
+
277
+ worker = ModelWorker(args.controller_address,
278
+ args.worker_address,
279
+ worker_id,
280
+ args.no_register,
281
+ args.model_path,
282
+ args.model_base,
283
+ args.model_name,
284
+ args.load_8bit,
285
+ args.load_4bit,
286
+ args.device,
287
+ use_flash_attn=args.use_flash_attn)
288
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
register_worker.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Manually register workers.
3
+
4
+ Usage:
5
+ python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
6
+ """
7
+
8
+ import argparse
9
+
10
+ import requests
11
+
12
+ if __name__ == "__main__":
13
+ parser = argparse.ArgumentParser()
14
+ parser.add_argument("--controller-address", type=str)
15
+ parser.add_argument("--worker-name", type=str)
16
+ parser.add_argument("--check-heart-beat", action="store_true")
17
+ args = parser.parse_args()
18
+
19
+ url = args.controller_address + "/register_worker"
20
+ data = {
21
+ "worker_name": args.worker_name,
22
+ "check_heart_beat": args.check_heart_beat,
23
+ "worker_status": None,
24
+ }
25
+ r = requests.post(url, json=data)
26
+ assert r.status_code == 200
sglang_worker.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A model worker executes the model.
3
+ """
4
+ import argparse
5
+ import asyncio
6
+ from concurrent.futures import ThreadPoolExecutor
7
+ import json
8
+ import time
9
+ import threading
10
+ import uuid
11
+
12
+ from fastapi import FastAPI, Request, BackgroundTasks
13
+ from fastapi.responses import StreamingResponse
14
+ import requests
15
+ import re
16
+ import uvicorn
17
+ from functools import partial
18
+
19
+ from ferretui.constants import WORKER_HEART_BEAT_INTERVAL
20
+ from ferretui.utils import (build_logger, server_error_msg,
21
+ pretty_print_semaphore)
22
+ from ferretui.mm_utils import process_images, load_image_from_base64, tokenizer_image_token, expand2square
23
+ from ferretui.constants import DEFAULT_IMAGE_TOKEN
24
+
25
+ import sglang as sgl
26
+ from sglang.backend.runtime_endpoint import RuntimeEndpoint
27
+
28
+
29
+ GB = 1 << 30
30
+
31
+ worker_id = str(uuid.uuid4())[:6]
32
+ logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
33
+ global_counter = 0
34
+
35
+ model_semaphore = None
36
+
37
+
38
+ def heart_beat_worker(controller):
39
+ while True:
40
+ time.sleep(WORKER_HEART_BEAT_INTERVAL)
41
+ controller.send_heart_beat()
42
+
43
+
44
+ @sgl.function
45
+ def pipeline(s, prompt, max_tokens):
46
+ for p in prompt:
47
+ if type(p) is str:
48
+ s += p
49
+ else:
50
+ s += sgl.image(p)
51
+ s += sgl.gen("response", max_tokens=max_tokens)
52
+
53
+
54
+ class ModelWorker:
55
+ def __init__(self, controller_addr, worker_addr, sgl_endpoint,
56
+ worker_id, no_register, model_name):
57
+ self.controller_addr = controller_addr
58
+ self.worker_addr = worker_addr
59
+ self.worker_id = worker_id
60
+
61
+ # Select backend
62
+ backend = RuntimeEndpoint(sgl_endpoint)
63
+ sgl.set_default_backend(backend)
64
+ model_path = backend.model_info["model_path"]
65
+
66
+ if model_path.endswith("/"):
67
+ model_path = model_path[:-1]
68
+ if model_name is None:
69
+ model_paths = model_path.split("/")
70
+ if model_paths[-1].startswith('checkpoint-'):
71
+ self.model_name = model_paths[-2] + "_" + model_paths[-1]
72
+ else:
73
+ self.model_name = model_paths[-1]
74
+ else:
75
+ self.model_name = model_name
76
+
77
+ logger.info(f"Loading the SGLANG model {self.model_name} on worker {worker_id} ...")
78
+
79
+ if not no_register:
80
+ self.register_to_controller()
81
+ self.heart_beat_thread = threading.Thread(
82
+ target=heart_beat_worker, args=(self,), daemon=True)
83
+ self.heart_beat_thread.start()
84
+
85
+ def register_to_controller(self):
86
+ logger.info("Register to controller")
87
+
88
+ url = self.controller_addr + "/register_worker"
89
+ data = {
90
+ "worker_name": self.worker_addr,
91
+ "check_heart_beat": True,
92
+ "worker_status": self.get_status()
93
+ }
94
+ r = requests.post(url, json=data)
95
+ assert r.status_code == 200
96
+
97
+ def send_heart_beat(self):
98
+ logger.info(f"Send heart beat. Models: {[self.model_name]}. "
99
+ f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
100
+ f"global_counter: {global_counter}")
101
+
102
+ url = self.controller_addr + "/receive_heart_beat"
103
+
104
+ while True:
105
+ try:
106
+ ret = requests.post(url, json={
107
+ "worker_name": self.worker_addr,
108
+ "queue_length": self.get_queue_length()}, timeout=5)
109
+ exist = ret.json()["exist"]
110
+ break
111
+ except requests.exceptions.RequestException as e:
112
+ logger.error(f"heart beat error: {e}")
113
+ time.sleep(5)
114
+
115
+ if not exist:
116
+ self.register_to_controller()
117
+
118
+ def get_queue_length(self):
119
+ if model_semaphore is None:
120
+ return 0
121
+ else:
122
+ return args.limit_model_concurrency - model_semaphore._value + (len(
123
+ model_semaphore._waiters) if model_semaphore._waiters is not None else 0)
124
+
125
+ def get_status(self):
126
+ return {
127
+ "model_names": [self.model_name],
128
+ "speed": 1,
129
+ "queue_length": self.get_queue_length(),
130
+ }
131
+
132
+ async def generate_stream(self, params):
133
+ ori_prompt = prompt = params["prompt"]
134
+ images = params.get("images", None)
135
+ if images is not None and len(images) > 0:
136
+ if len(images) > 0:
137
+ if len(images) != prompt.count(DEFAULT_IMAGE_TOKEN):
138
+ raise ValueError("Number of images does not match number of <image> tokens in prompt")
139
+
140
+ images = [load_image_from_base64(image) for image in images]
141
+
142
+ # FIXME: for image-start/end token
143
+ # replace_token = DEFAULT_IMAGE_TOKEN
144
+ # if getattr(self.model.config, 'mm_use_im_start_end', False):
145
+ # replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
146
+ # prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
147
+ prompt = prompt.replace(' ' + DEFAULT_IMAGE_TOKEN + '\n', DEFAULT_IMAGE_TOKEN)
148
+ prompt_split = prompt.split(DEFAULT_IMAGE_TOKEN)
149
+ prompt = []
150
+ for i in range(len(prompt_split)):
151
+ prompt.append(prompt_split[i])
152
+ if i < len(images):
153
+ prompt.append(images[i])
154
+ else:
155
+ prompt = [prompt]
156
+
157
+ temperature = float(params.get("temperature", 1.0))
158
+ top_p = float(params.get("top_p", 1.0))
159
+ # max_context_length = getattr(model.config, 'max_position_embeddings', 2048)
160
+ max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
161
+ stop_str = params.get("stop", None)
162
+ stop_str = [stop_str] if stop_str is not None else None
163
+
164
+ print({'prompt': prompt, 'max_new_tokens': max_new_tokens, 'temperature': temperature, 'top_p': top_p})
165
+ state = pipeline.run(prompt, max_new_tokens, temperature=temperature, top_p=top_p, stream=True)
166
+
167
+ generated_text = ori_prompt
168
+ async for text_outputs in state.text_async_iter(var_name="response"):
169
+ generated_text += text_outputs
170
+ yield json.dumps({"text": generated_text, "error_code": 0}).encode() + b"\0"
171
+
172
+ async def generate_stream_gate(self, params):
173
+ try:
174
+ async for x in self.generate_stream(params):
175
+ yield x
176
+ except ValueError as e:
177
+ print("Caught ValueError:", e)
178
+ ret = {
179
+ "text": server_error_msg,
180
+ "error_code": 1,
181
+ }
182
+ yield json.dumps(ret).encode() + b"\0"
183
+ except Exception as e:
184
+ print("Caught Unknown Error", e)
185
+ ret = {
186
+ "text": server_error_msg,
187
+ "error_code": 1,
188
+ }
189
+ yield json.dumps(ret).encode() + b"\0"
190
+
191
+
192
+ app = FastAPI()
193
+
194
+
195
+ def release_model_semaphore(fn=None):
196
+ model_semaphore.release()
197
+ if fn is not None:
198
+ fn()
199
+
200
+
201
+ @app.post("/worker_generate_stream")
202
+ async def generate_stream(request: Request):
203
+ global model_semaphore, global_counter
204
+ global_counter += 1
205
+ params = await request.json()
206
+
207
+ if model_semaphore is None:
208
+ model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
209
+ await model_semaphore.acquire()
210
+ worker.send_heart_beat()
211
+ generator = worker.generate_stream_gate(params)
212
+ background_tasks = BackgroundTasks()
213
+ background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
214
+ return StreamingResponse(generator, background=background_tasks)
215
+
216
+
217
+ @app.post("/worker_get_status")
218
+ async def get_status(request: Request):
219
+ return worker.get_status()
220
+
221
+
222
+ if __name__ == "__main__":
223
+ parser = argparse.ArgumentParser()
224
+ parser.add_argument("--host", type=str, default="localhost")
225
+ parser.add_argument("--port", type=int, default=21002)
226
+ parser.add_argument("--worker-address", type=str,
227
+ default="http://localhost:21002")
228
+ parser.add_argument("--controller-address", type=str,
229
+ default="http://localhost:21001")
230
+ parser.add_argument("--model-name", type=str)
231
+ parser.add_argument("--sgl-endpoint", type=str)
232
+ parser.add_argument("--limit-model-concurrency", type=int, default=5)
233
+ parser.add_argument("--stream-interval", type=int, default=1)
234
+ parser.add_argument("--no-register", action="store_true")
235
+ args = parser.parse_args()
236
+ logger.info(f"args: {args}")
237
+
238
+ worker = ModelWorker(args.controller_address,
239
+ args.worker_address,
240
+ args.sgl_endpoint,
241
+ worker_id,
242
+ args.no_register,
243
+ args.model_name)
244
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
test_message.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+
4
+ import requests
5
+
6
+ from ferretui.conversation import default_conversation
7
+
8
+
9
+ def main():
10
+ if args.worker_address:
11
+ worker_addr = args.worker_address
12
+ else:
13
+ controller_addr = args.controller_address
14
+ ret = requests.post(controller_addr + "/refresh_all_workers")
15
+ ret = requests.post(controller_addr + "/list_models")
16
+ models = ret.json()["models"]
17
+ models.sort()
18
+ print(f"Models: {models}")
19
+
20
+ ret = requests.post(controller_addr + "/get_worker_address",
21
+ json={"model": args.model_name})
22
+ worker_addr = ret.json()["address"]
23
+ print(f"worker_addr: {worker_addr}")
24
+
25
+ if worker_addr == "":
26
+ return
27
+
28
+ conv = default_conversation.copy()
29
+ conv.append_message(conv.roles[0], args.message)
30
+ prompt = conv.get_prompt()
31
+
32
+ headers = {"User-Agent": "LLaVA Client"}
33
+ pload = {
34
+ "model": args.model_name,
35
+ "prompt": prompt,
36
+ "max_new_tokens": args.max_new_tokens,
37
+ "temperature": 0.7,
38
+ "stop": conv.sep,
39
+ }
40
+ response = requests.post(worker_addr + "/worker_generate_stream", headers=headers,
41
+ json=pload, stream=True)
42
+
43
+ print(prompt.replace(conv.sep, "\n"), end="")
44
+ for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
45
+ if chunk:
46
+ data = json.loads(chunk.decode("utf-8"))
47
+ output = data["text"].split(conv.sep)[-1]
48
+ print(output, end="\r")
49
+ print("")
50
+
51
+
52
+ if __name__ == "__main__":
53
+ parser = argparse.ArgumentParser()
54
+ parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
55
+ parser.add_argument("--worker-address", type=str)
56
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
57
+ parser.add_argument("--max-new-tokens", type=int, default=32)
58
+ parser.add_argument("--message", type=str, default=
59
+ "Tell me a story with more than 1000 words.")
60
+ args = parser.parse_args()
61
+
62
+ main()
untitled ADDED
File without changes
utils.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import logging
3
+ import logging.handlers
4
+ import os
5
+ import sys
6
+
7
+ import requests
8
+
9
+ LOGDIR = "."
10
+
11
+ server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
12
+ moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN."
13
+
14
+ handler = None
15
+
16
+
17
+ def build_logger(logger_name, logger_filename):
18
+ global handler
19
+
20
+ formatter = logging.Formatter(
21
+ fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
22
+ datefmt="%Y-%m-%d %H:%M:%S",
23
+ )
24
+
25
+ # Set the format of root handlers
26
+ if not logging.getLogger().handlers:
27
+ logging.basicConfig(level=logging.INFO)
28
+ logging.getLogger().handlers[0].setFormatter(formatter)
29
+
30
+ # Redirect stdout and stderr to loggers
31
+ stdout_logger = logging.getLogger("stdout")
32
+ stdout_logger.setLevel(logging.INFO)
33
+ sl = StreamToLogger(stdout_logger, logging.INFO)
34
+ sys.stdout = sl
35
+
36
+ stderr_logger = logging.getLogger("stderr")
37
+ stderr_logger.setLevel(logging.ERROR)
38
+ sl = StreamToLogger(stderr_logger, logging.ERROR)
39
+ sys.stderr = sl
40
+
41
+ # Get logger
42
+ logger = logging.getLogger(logger_name)
43
+ logger.setLevel(logging.INFO)
44
+
45
+ # Add a file handler for all loggers
46
+ if handler is None:
47
+ os.makedirs(LOGDIR, exist_ok=True)
48
+ filename = os.path.join(LOGDIR, logger_filename)
49
+ handler = logging.handlers.TimedRotatingFileHandler(
50
+ filename, when='D', utc=True, encoding='UTF-8')
51
+ handler.setFormatter(formatter)
52
+
53
+ for name, item in logging.root.manager.loggerDict.items():
54
+ if isinstance(item, logging.Logger):
55
+ item.addHandler(handler)
56
+
57
+ return logger
58
+
59
+
60
+ class StreamToLogger(object):
61
+ """
62
+ Fake file-like stream object that redirects writes to a logger instance.
63
+ """
64
+ def __init__(self, logger, log_level=logging.INFO):
65
+ self.terminal = sys.stdout
66
+ self.logger = logger
67
+ self.log_level = log_level
68
+ self.linebuf = ''
69
+
70
+ def __getattr__(self, attr):
71
+ return getattr(self.terminal, attr)
72
+
73
+ def write(self, buf):
74
+ temp_linebuf = self.linebuf + buf
75
+ self.linebuf = ''
76
+ for line in temp_linebuf.splitlines(True):
77
+ # From the io.TextIOWrapper docs:
78
+ # On output, if newline is None, any '\n' characters written
79
+ # are translated to the system default line separator.
80
+ # By default sys.stdout.write() expects '\n' newlines and then
81
+ # translates them so this is still cross platform.
82
+ if line[-1] == '\n':
83
+ self.logger.log(self.log_level, line.rstrip())
84
+ else:
85
+ self.linebuf += line
86
+
87
+ def flush(self):
88
+ if self.linebuf != '':
89
+ self.logger.log(self.log_level, self.linebuf.rstrip())
90
+ self.linebuf = ''
91
+
92
+
93
+ def disable_torch_init():
94
+ """
95
+ Disable the redundant torch default initialization to accelerate model creation.
96
+ """
97
+ import torch
98
+ setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
99
+ setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
100
+
101
+
102
+ def violates_moderation(text):
103
+ """
104
+ Check whether the text violates OpenAI moderation API.
105
+ """
106
+ url = "https://api.openai.com/v1/moderations"
107
+ headers = {"Content-Type": "application/json",
108
+ "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
109
+ text = text.replace("\n", "")
110
+ data = "{" + '"input": ' + f'"{text}"' + "}"
111
+ data = data.encode("utf-8")
112
+ try:
113
+ ret = requests.post(url, headers=headers, data=data, timeout=5)
114
+ flagged = ret.json()["results"][0]["flagged"]
115
+ except requests.exceptions.RequestException as e:
116
+ flagged = False
117
+ except KeyError as e:
118
+ flagged = False
119
+
120
+ return flagged
121
+
122
+
123
+ def pretty_print_semaphore(semaphore):
124
+ if semaphore is None:
125
+ return "None"
126
+ return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"