Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
#import openai
|
3 |
+
import requests
|
4 |
+
#import json
|
5 |
+
#from PIL import Image
|
6 |
+
#from io import BytesIO
|
7 |
+
import os
|
8 |
+
|
9 |
+
api_key = os.environ.get("api_key")
|
10 |
+
|
11 |
+
#model_engine = "text-davinci-002"
|
12 |
+
#diffusion_model = "image-alpha-001"
|
13 |
+
|
14 |
+
def generate_text_descriptions(concept):
|
15 |
+
api_key = api_key
|
16 |
+
|
17 |
+
concept = concept
|
18 |
+
user_id = "user_123" # Unique ID used to track conversation state
|
19 |
+
body = {"action": {"type": "text", "payload": concept}}
|
20 |
+
|
21 |
+
# Start a conversation
|
22 |
+
response = requests.post(
|
23 |
+
f"https://general-runtime.voiceflow.com/state/user/{user_id}/interact",
|
24 |
+
json=body,
|
25 |
+
headers={"Authorization": api_key},
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
return response.json()[1]['payload']['message'].strip("\n")
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
def generate_shots(prompt):
|
34 |
+
persona = "You are an award winning cinematographer"
|
35 |
+
prompt_shot = f"{persona} create an interesting shot using different camera angles for the concept enclosed in three backticks ```{prompt}```."
|
36 |
+
prompt_lighting = f"{persona} create Lighting Design for the concept enclosed in three backticks ```{prompt}``` and provide description for your choice."
|
37 |
+
prompt_color_grading = f"{persona} Create Color Grading for the concept enclosed in three backticks ```{prompt}``` and provide description for your choice."
|
38 |
+
|
39 |
+
shot_description = generate_text_descriptions(prompt_shot)
|
40 |
+
lighting_description = generate_text_descriptions(prompt_lighting)
|
41 |
+
color_grading_description = generate_text_descriptions(prompt_color_grading)
|
42 |
+
|
43 |
+
|
44 |
+
return shot_description, lighting_description, color_grading_description
|
45 |
+
|
46 |
+
def generate_output(prompt):
|
47 |
+
shots, lighting, color_grading = generate_shots(prompt)
|
48 |
+
|
49 |
+
return shots, lighting, color_grading
|
50 |
+
|
51 |
+
|
52 |
+
input_text = gr.inputs.Textbox(lines=5, label="Please provide the concept of your film to generate shot compositions, lighting designs, and color grading styles.")
|
53 |
+
|
54 |
+
iface = gr.Interface(fn=generate_output, inputs=input_text, outputs=["text", "text", "text"],
|
55 |
+
layout="vertical", title="CinemAI", description="Your AI assistant that generates shot compositions, lighting designs, and color grading styles.")
|
56 |
+
|
57 |
+
iface.launch()
|