shigeru saito commited on
Commit
168b94f
·
1 Parent(s): 728fc97

function化(jsonを出力するだけで画像はまだ)

Browse files
Files changed (2) hide show
  1. app.py +112 -24
  2. requirements.txt +2 -1
app.py CHANGED
@@ -5,16 +5,17 @@ import os
5
  import fileinput
6
  from dotenv import load_dotenv
7
  import io
 
8
  from PIL import Image
9
  from stability_sdk import client
10
  import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
11
 
12
- title="ochyAI recipe generator"
13
  inputs_label="どんな料理か教えてくれれば,新しいレシピを考えます"
14
- outputs_label="ochyAIが返信をします"
15
  visual_outputs_label="料理のイメージ"
16
  description="""
17
- - ※入出力の文字数は最大1000文字程度までを目安に入力してください。解答に120秒くらいかかります.エラーが出た場合はログを開いてエラーメッセージを送ってくれるとochyAIが喜びます
18
  """
19
 
20
  article = """
@@ -26,10 +27,13 @@ os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443'
26
  stability_api = client.StabilityInference(
27
  key=os.getenv('STABILITY_KEY'),
28
  verbose=True,
29
- engine="stable-diffusion-xl-beta-v2-2-2",
 
 
30
  )
31
  # MODEL = "gpt-4"
32
- MODEL = "gpt-3.5-turbo-16k"
 
33
 
34
  def get_filetext(filename, cache={}):
35
  if filename in cache:
@@ -55,14 +59,14 @@ class OpenAI:
55
  data = {
56
  "model": MODEL,
57
  "messages": [
58
- {"role": "system", "content": constraints}
59
  ,{"role": "system", "content": template}
60
  ,{"role": "assistant", "content": "Sure!"}
61
- ,{"role": "user", "content": prompt}
62
  ,{"role": "assistant", "content": start_with}
63
- ],
64
- }
65
-
66
  # ChatCompletion APIを呼び出す
67
  response = requests.post(
68
  "https://api.openai.com/v1/chat/completions",
@@ -71,7 +75,7 @@ class OpenAI:
71
  "Authorization": f"Bearer {openai.api_key}"
72
  },
73
  json=data
74
- )
75
 
76
  # ChatCompletion APIから返された結果を取得する
77
  result = response.json()
@@ -80,7 +84,7 @@ class OpenAI:
80
  content = result["choices"][0]["message"]["content"].strip()
81
 
82
  visualize_prompt = content.split("### Prompt for Visual Expression\n\n")[1]
83
-
84
  #print("split_content:"+split_content)
85
 
86
  #if len(split_content) > 1:
@@ -89,18 +93,27 @@ class OpenAI:
89
  # visualize_prompt = "vacant dish"
90
 
91
  #print("visualize_prompt:"+visualize_prompt)
92
-
93
  answers = stability_api.generate(
94
  prompt=visualize_prompt,
95
  )
 
 
 
 
 
 
 
 
 
 
 
96
 
97
- for resp in answers:
98
- for artifact in resp.artifacts:
99
- if artifact.finish_reason == generation.FILTER:
100
- print("NSFW")
101
- if artifact.type == generation.ARTIFACT_IMAGE:
102
- img = Image.open(io.BytesIO(artifact.binary))
103
- return [content, img]
104
 
105
  class NajiminoAI:
106
 
@@ -118,9 +131,84 @@ class NajiminoAI:
118
 
119
  @classmethod
120
  def generate_emo(cls, user_message):
121
- prompt = NajiminoAI.generate_emo_prompt(user_message);
122
- start_with = ""
123
- result = OpenAI.chat_completion(prompt=prompt, start_with=start_with)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  return result
125
 
126
  def main():
@@ -136,4 +224,4 @@ def main():
136
  iface.launch()
137
 
138
  if __name__ == '__main__':
139
- main()
 
5
  import fileinput
6
  from dotenv import load_dotenv
7
  import io
8
+ import json
9
  from PIL import Image
10
  from stability_sdk import client
11
  import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
12
 
13
+ title="najimino AI recipe generator"
14
  inputs_label="どんな料理か教えてくれれば,新しいレシピを考えます"
15
+ outputs_label="najimino AIが返信をします"
16
  visual_outputs_label="料理のイメージ"
17
  description="""
18
+ - ※入出力の文字数は最大1000文字程度までを目安に入力してください。回答に20秒くらいかかります.
19
  """
20
 
21
  article = """
 
27
  stability_api = client.StabilityInference(
28
  key=os.getenv('STABILITY_KEY'),
29
  verbose=True,
30
+ engine="stable-diffusion-512-v2-1",
31
+ # Available engines: stable-diffusion-v1 stable-diffusion-v1-5 stable-diffusion-512-v2-0 stable-diffusion-768-v2-0
32
+ # stable-diffusion-512-v2-1 stable-diffusion-768-v2-1 stable-diffusion-xl-beta-v2-2-2 stable-inpainting-v1-0 stable-inpainting-512-v2-0
33
  )
34
  # MODEL = "gpt-4"
35
+ # MODEL = "gpt-3.5-turbo-16k"
36
+ MODEL = "gpt-3.5-turbo-0613"
37
 
38
  def get_filetext(filename, cache={}):
39
  if filename in cache:
 
59
  data = {
60
  "model": MODEL,
61
  "messages": [
62
+ {"role": "system", "content": constraints}
63
  ,{"role": "system", "content": template}
64
  ,{"role": "assistant", "content": "Sure!"}
65
+ ,{"role": "user", "content": prompt}
66
  ,{"role": "assistant", "content": start_with}
67
+ ],
68
+ }
69
+
70
  # ChatCompletion APIを呼び出す
71
  response = requests.post(
72
  "https://api.openai.com/v1/chat/completions",
 
75
  "Authorization": f"Bearer {openai.api_key}"
76
  },
77
  json=data
78
+ )
79
 
80
  # ChatCompletion APIから返された結果を取得する
81
  result = response.json()
 
84
  content = result["choices"][0]["message"]["content"].strip()
85
 
86
  visualize_prompt = content.split("### Prompt for Visual Expression\n\n")[1]
87
+
88
  #print("split_content:"+split_content)
89
 
90
  #if len(split_content) > 1:
 
93
  # visualize_prompt = "vacant dish"
94
 
95
  #print("visualize_prompt:"+visualize_prompt)
96
+
97
  answers = stability_api.generate(
98
  prompt=visualize_prompt,
99
  )
100
+
101
+ @classmethod
102
+ def chat_completion_with_function(cls, prompt, messages, functions):
103
+ print("prompt:"+prompt)
104
+
105
+ response = openai.ChatCompletion.create(
106
+ model=MODEL,
107
+ messages=messages,
108
+ functions=functions,
109
+ function_call="auto"
110
+ )
111
 
112
+ # ChatCompletion APIから返された結果を取得する
113
+ content = json.dumps(response.choices[0].message, indent=2)
114
+ print(content)
115
+
116
+ return [content, None]
 
 
117
 
118
  class NajiminoAI:
119
 
 
131
 
132
  @classmethod
133
  def generate_emo(cls, user_message):
134
+ # prompt = NajiminoAI.generate_emo_prompt(user_message);
135
+ # start_with = ""
136
+ # result = OpenAI.chat_completion(prompt=prompt, start_with=start_with)
137
+ # result = OpenAI.chat_completion(prompt=user_message, start_with=start_with)
138
+
139
+ constraints = get_filetext(filename = "constraints.md")
140
+
141
+ messages = [
142
+ {"role": "system", "content": constraints}
143
+ ,{"role": "user", "content": user_message}
144
+ ]
145
+ functions = [
146
+ {
147
+ "name": "set_recipe",
148
+ "description": "どんな料理か教えてくれれば,新しいレシピを考えます",
149
+ "parameters": {
150
+ "type": "object",
151
+ "default": {},
152
+ "title": "The Schema of new recipe",
153
+ "required": [
154
+ "lang",
155
+ "title",
156
+ "description",
157
+ "instruction",
158
+ "comment_feelings_taste",
159
+ # "explanation_to_blind_person",
160
+ "explanation_to_blind_person"
161
+ # "prompt_for_visual_expression_in_en"
162
+ ],
163
+ "properties": {
164
+ "lang": {
165
+ "type": "string",
166
+ "default": "ja",
167
+ "title": "Language Schema",
168
+ "examples": [
169
+ "ja"
170
+ ]
171
+ },
172
+ "title": {
173
+ "type": "string",
174
+ "default": "",
175
+ "title": "The title Schema",
176
+ "examples": [
177
+ "グルテンフリーの香ばしいサバのお好み焼き"
178
+ ]
179
+ },
180
+ "description": {
181
+ "type": "string",
182
+ "default": "",
183
+ "title": "The description Schema"
184
+ },
185
+ "instruction": {
186
+ "type": "string",
187
+ "default": "",
188
+ "title": "The instruction Schema"
189
+ },
190
+ "comment_feelings_taste": {
191
+ "type": "string",
192
+ "default": "",
193
+ "title": "The Schema of comment feelings taste Schema"
194
+ },
195
+ "explanation_to_blind_person": {
196
+ "type": "string",
197
+ "default": "",
198
+ "title": "The Schema of Explanation to blind person"
199
+ },
200
+ "prompt_for_visual_expression_in_en": {
201
+ "type": "string",
202
+ "default": "",
203
+ "title": "The Schema of prompt for visual expression in English"
204
+ }
205
+ }
206
+ }
207
+ }
208
+ ]
209
+
210
+ result = OpenAI.chat_completion_with_function(prompt=user_message, messages=messages, functions=functions)
211
+
212
  return result
213
 
214
  def main():
 
224
  iface.launch()
225
 
226
  if __name__ == '__main__':
227
+ main()
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  openai>=0.27.0
2
  python-dotenv
3
- stability-sdk
 
 
1
  openai>=0.27.0
2
  python-dotenv
3
+ stability-sdk
4
+ gradio