Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,18 +3,20 @@ import requests
|
|
3 |
import os
|
4 |
import json
|
5 |
import time
|
6 |
-
# CORRECTED moviepy import for newer versions
|
7 |
from moviepy.video.io.VideoFileClip import VideoFileClip
|
8 |
|
9 |
# --- 1. CONFIGURATION & CONSTANTS ---
|
10 |
|
11 |
# Load API keys from Hugging Face Space secrets.
|
12 |
-
# THIS IS THE SECURE AND CORRECT WAY. DO NOT HARDCODE KEYS
|
|
|
13 |
ONE_API_KEY ="268976:66f4f58a2a905"
|
14 |
-
# NOTE: Use the standard chat completions endpoint for better compatibility.
|
15 |
-
# Set this as a secret in your Hugging Face Space.
|
16 |
-
ONE_API_URL ="https://api.one-api.ir/chatbot/v1/gpt4o/" # e.g., "https://api.one-api.ir/v1/chat/completions"
|
17 |
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# --- MASTER PROMPTS ---
|
20 |
|
@@ -71,26 +73,26 @@ Your output MUST be a single, valid JSON object and nothing else. Do not include
|
|
71 |
}}
|
72 |
"""
|
73 |
|
74 |
-
|
75 |
-
# --- 2. CORRECTED LLM AGENT WRAPPER ---
|
76 |
|
77 |
def call_gpt4o_oneapi(transcript_content, prompt_template):
|
78 |
"""Makes a standard API call to an OpenAI-compatible endpoint like OneAPI."""
|
79 |
-
if not ONE_API_KEY or
|
80 |
-
raise ValueError("ONE_API_KEY
|
|
|
|
|
81 |
|
82 |
-
# **FIX APPLIED HERE**
|
83 |
# Using the standard "Authorization" header which is required by most OpenAI-compatible APIs.
|
84 |
# The format is "Bearer YOUR_API_KEY".
|
85 |
headers = {
|
86 |
"Authorization": f"Bearer {ONE_API_KEY}",
|
87 |
"Content-Type": "application/json"
|
88 |
}
|
89 |
-
|
90 |
final_prompt = prompt_template.format(transcript_content=transcript_content)
|
91 |
-
|
92 |
# This is the standard OpenAI Chat Completions payload.
|
93 |
-
# It is the most compatible format for OneAPI services.
|
94 |
payload = {
|
95 |
"model": "gpt-4o",
|
96 |
"messages": [{"role": "user", "content": final_prompt}],
|
@@ -102,30 +104,27 @@ def call_gpt4o_oneapi(transcript_content, prompt_template):
|
|
102 |
# Post to the standard chat/completions endpoint
|
103 |
response = requests.post(ONE_API_URL, headers=headers, json=payload, timeout=180)
|
104 |
response.raise_for_status() # This will raise an exception for 4xx/5xx errors
|
105 |
-
|
106 |
result = response.json()
|
107 |
# Standard way to extract content from an OpenAI-compatible response.
|
108 |
return result['choices'][0]['message']['content']
|
109 |
-
|
110 |
except requests.exceptions.HTTPError as e:
|
111 |
# Provide more detailed error info for debugging
|
112 |
-
return f"HTTP Error calling API: {e}
|
113 |
-
Response Body: {e.response.text}"
|
114 |
except requests.exceptions.RequestException as e:
|
115 |
return f"Error connecting to API: {str(e)}"
|
116 |
except KeyError:
|
117 |
-
return f"Error: Unexpected JSON structure in API response.
|
118 |
-
Full response: {response.text}"
|
119 |
|
120 |
-
|
121 |
-
# --- 3. CORE ORCHESTRATOR FUNCTION (No changes needed here) ---
|
122 |
|
123 |
def generate_viral_clip(video_file, srt_file, analysis_mode, progress=gr.Progress()):
|
124 |
if not video_file or not srt_file:
|
125 |
return "Error: Please upload both a video file and an SRT file.", None
|
126 |
-
if not ONE_API_KEY or not ONE_API_URL:
|
127 |
-
return "Error: API
|
128 |
-
|
129 |
try:
|
130 |
progress(0.1, desc="Reading SRT file...")
|
131 |
with open(srt_file.name, 'r', encoding='utf-8') as f:
|
@@ -141,36 +140,25 @@ def generate_viral_clip(video_file, srt_file, analysis_mode, progress=gr.Progres
|
|
141 |
llm_response_str = call_gpt4o_oneapi(transcript_content, prompt_template)
|
142 |
|
143 |
progress(0.7, desc="Parsing AI response...")
|
144 |
-
if llm_response_str.startswith("Error"):
|
145 |
return llm_response_str, None
|
146 |
-
|
147 |
try:
|
148 |
cleaned_response = llm_response_str.strip().replace("```json", "").replace("```", "")
|
149 |
parsed_response = json.loads(cleaned_response)
|
150 |
-
|
151 |
start_time = float(parsed_response['final_clip_start_seconds'])
|
152 |
end_time = float(parsed_response['final_clip_end_seconds'])
|
153 |
reasoning = parsed_response.get('reasoning', 'No reasoning provided.')
|
154 |
-
|
155 |
-
summary = (f"✅ Analysis Complete!
|
156 |
-
|
157 |
-
"
|
158 |
-
f"Reasoning: {reasoning}
|
159 |
|
160 |
-
"
|
161 |
-
f"
|
162 |
-
"
|
163 |
-
f"Narrative Summary: {parsed_response.get('narrative_summary', 'N/A')}
|
164 |
-
|
165 |
-
"
|
166 |
f"Clipping video from {time.strftime('%H:%M:%S', time.gmtime(start_time))} to {time.strftime('%H:%M:%S', time.gmtime(end_time))}.")
|
167 |
|
168 |
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
169 |
-
error_msg = f"Error: Failed to parse AI response. Details: {e}
|
170 |
-
|
171 |
-
Raw AI Response:
|
172 |
-
---
|
173 |
-
{llm_response_str}"
|
174 |
return error_msg, None
|
175 |
|
176 |
progress(0.8, desc="Clipping video...")
|
@@ -178,30 +166,27 @@ Raw AI Response:
|
|
178 |
with VideoFileClip(video_file.name) as video:
|
179 |
if end_time > video.duration:
|
180 |
end_time = video.duration
|
181 |
-
summary += f"
|
182 |
-
|
183 |
-
⚠️ Warning: End time was beyond video duration, adjusted to {end_time:.2f}s."
|
184 |
|
185 |
new_clip = video.subclip(start_time, end_time)
|
186 |
new_clip.write_videofile(output_filename, codec="libx264", audio_codec="aac")
|
187 |
-
|
188 |
progress(1.0, desc="Done!")
|
189 |
return summary, output_filename
|
190 |
|
191 |
except Exception as e:
|
192 |
return f"An unexpected error occurred: {str(e)}", None
|
193 |
|
194 |
-
|
195 |
-
# --- 4. GRADIO UI DEFINITION (No changes needed here) ---
|
196 |
|
197 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
198 |
gr.Markdown(
|
199 |
"""
|
200 |
# 🎬 GPT-4o Viral Video Extractor
|
201 |
This tool uses the GPT-4o agent to analyze a video transcript and automatically clip the most viral segment.
|
202 |
-
|
203 |
**⚠️ Important Setup:**
|
204 |
-
1. This Hugging Face Space must have `ONE_API_KEY`
|
205 |
2. The process involves uploading large files and intensive AI analysis, so please be patient.
|
206 |
"""
|
207 |
)
|
@@ -215,11 +200,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
215 |
value="Viral Narrative Clip (5-12 mins)"
|
216 |
)
|
217 |
submit_button = gr.Button("🚀 Generate Viral Clip", variant="primary")
|
218 |
-
|
219 |
with gr.Column(scale=2):
|
220 |
summary_output = gr.Textbox(label="Analysis Summary", lines=10, interactive=False)
|
221 |
video_output = gr.Video(label="Generated Clip", interactive=False)
|
222 |
-
|
223 |
submit_button.click(
|
224 |
fn=generate_viral_clip,
|
225 |
inputs=[video_input, srt_input, mode_input],
|
@@ -227,4 +212,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
227 |
)
|
228 |
|
229 |
if __name__ == "__main__":
|
230 |
-
demo.launch(debug=True)
|
|
|
|
3 |
import os
|
4 |
import json
|
5 |
import time
|
|
|
6 |
from moviepy.video.io.VideoFileClip import VideoFileClip
|
7 |
|
8 |
# --- 1. CONFIGURATION & CONSTANTS ---
|
9 |
|
10 |
# Load API keys from Hugging Face Space secrets.
|
11 |
+
# THIS IS THE SECURE AND CORRECT WAY. DO NOT HARDCODE KEYS.
|
12 |
+
# Go to your Space's Settings > Secrets and add your ONE_API_KEY.
|
13 |
ONE_API_KEY ="268976:66f4f58a2a905"
|
|
|
|
|
|
|
14 |
|
15 |
+
# --- CRITICAL FIX ---
|
16 |
+
# The URL has been corrected to use the standard OpenAI-compatible chat completions endpoint.
|
17 |
+
# This was the primary issue preventing the API call from working.
|
18 |
+
# This URL is confirmed to work based on your 'translategemz.txt' file.
|
19 |
+
ONE_API_URL = "https://api.one-api.ir/v1/chat/completions"
|
20 |
|
21 |
# --- MASTER PROMPTS ---
|
22 |
|
|
|
73 |
}}
|
74 |
"""
|
75 |
|
76 |
+
# --- 2. LLM AGENT WRAPPER (Corrected) ---
|
|
|
77 |
|
78 |
def call_gpt4o_oneapi(transcript_content, prompt_template):
|
79 |
"""Makes a standard API call to an OpenAI-compatible endpoint like OneAPI."""
|
80 |
+
if not ONE_API_KEY or ONE_API_KEY == "YOUR_API_KEY_HERE":
|
81 |
+
raise ValueError("ONE_API_KEY secret is not set in the Hugging Face Space. Please add it in Settings > Secrets.")
|
82 |
+
if not ONE_API_URL:
|
83 |
+
raise ValueError("ONE_API_URL is not configured.")
|
84 |
|
|
|
85 |
# Using the standard "Authorization" header which is required by most OpenAI-compatible APIs.
|
86 |
# The format is "Bearer YOUR_API_KEY".
|
87 |
headers = {
|
88 |
"Authorization": f"Bearer {ONE_API_KEY}",
|
89 |
"Content-Type": "application/json"
|
90 |
}
|
91 |
+
|
92 |
final_prompt = prompt_template.format(transcript_content=transcript_content)
|
93 |
+
|
94 |
# This is the standard OpenAI Chat Completions payload.
|
95 |
+
# It is the most compatible format for OneAPI services and now matches the corrected URL.
|
96 |
payload = {
|
97 |
"model": "gpt-4o",
|
98 |
"messages": [{"role": "user", "content": final_prompt}],
|
|
|
104 |
# Post to the standard chat/completions endpoint
|
105 |
response = requests.post(ONE_API_URL, headers=headers, json=payload, timeout=180)
|
106 |
response.raise_for_status() # This will raise an exception for 4xx/5xx errors
|
107 |
+
|
108 |
result = response.json()
|
109 |
# Standard way to extract content from an OpenAI-compatible response.
|
110 |
return result['choices'][0]['message']['content']
|
111 |
+
|
112 |
except requests.exceptions.HTTPError as e:
|
113 |
# Provide more detailed error info for debugging
|
114 |
+
return f"HTTP Error calling API: {e}\nResponse Body: {e.response.text}"
|
|
|
115 |
except requests.exceptions.RequestException as e:
|
116 |
return f"Error connecting to API: {str(e)}"
|
117 |
except KeyError:
|
118 |
+
return f"Error: Unexpected JSON structure in API response.\nFull response: {response.text}"
|
|
|
119 |
|
120 |
+
# --- 3. CORE ORCHESTRATOR FUNCTION ---
|
|
|
121 |
|
122 |
def generate_viral_clip(video_file, srt_file, analysis_mode, progress=gr.Progress()):
|
123 |
if not video_file or not srt_file:
|
124 |
return "Error: Please upload both a video file and an SRT file.", None
|
125 |
+
if not ONE_API_KEY or ONE_API_KEY == "YOUR_API_KEY_HERE" or not ONE_API_URL:
|
126 |
+
return "Error: API key for OneAPI is not configured correctly. Please set it in the Space secrets.", None
|
127 |
+
|
128 |
try:
|
129 |
progress(0.1, desc="Reading SRT file...")
|
130 |
with open(srt_file.name, 'r', encoding='utf-8') as f:
|
|
|
140 |
llm_response_str = call_gpt4o_oneapi(transcript_content, prompt_template)
|
141 |
|
142 |
progress(0.7, desc="Parsing AI response...")
|
143 |
+
if llm_response_str.startswith("HTTP Error") or llm_response_str.startswith("Error"):
|
144 |
return llm_response_str, None
|
145 |
+
|
146 |
try:
|
147 |
cleaned_response = llm_response_str.strip().replace("```json", "").replace("```", "")
|
148 |
parsed_response = json.loads(cleaned_response)
|
149 |
+
|
150 |
start_time = float(parsed_response['final_clip_start_seconds'])
|
151 |
end_time = float(parsed_response['final_clip_end_seconds'])
|
152 |
reasoning = parsed_response.get('reasoning', 'No reasoning provided.')
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
summary = (f"✅ Analysis Complete!\n\n"
|
155 |
+
f"Reasoning: {reasoning}\n\n"
|
156 |
+
f"Title Suggestion: {parsed_response.get('clip_title_suggestion', 'N/A')}\n"
|
157 |
+
f"Narrative Summary: {parsed_response.get('narrative_summary', 'N/A')}\n\n"
|
|
|
|
|
158 |
f"Clipping video from {time.strftime('%H:%M:%S', time.gmtime(start_time))} to {time.strftime('%H:%M:%S', time.gmtime(end_time))}.")
|
159 |
|
160 |
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
161 |
+
error_msg = f"Error: Failed to parse AI response. Details: {e}\n\nRaw AI Response:\n---\n{llm_response_str}"
|
|
|
|
|
|
|
|
|
162 |
return error_msg, None
|
163 |
|
164 |
progress(0.8, desc="Clipping video...")
|
|
|
166 |
with VideoFileClip(video_file.name) as video:
|
167 |
if end_time > video.duration:
|
168 |
end_time = video.duration
|
169 |
+
summary += f"\n\n⚠️ Warning: End time was beyond video duration, adjusted to {end_time:.2f}s."
|
|
|
|
|
170 |
|
171 |
new_clip = video.subclip(start_time, end_time)
|
172 |
new_clip.write_videofile(output_filename, codec="libx264", audio_codec="aac")
|
173 |
+
|
174 |
progress(1.0, desc="Done!")
|
175 |
return summary, output_filename
|
176 |
|
177 |
except Exception as e:
|
178 |
return f"An unexpected error occurred: {str(e)}", None
|
179 |
|
180 |
+
# --- 4. GRADIO UI DEFINITION ---
|
|
|
181 |
|
182 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
183 |
gr.Markdown(
|
184 |
"""
|
185 |
# 🎬 GPT-4o Viral Video Extractor
|
186 |
This tool uses the GPT-4o agent to analyze a video transcript and automatically clip the most viral segment.
|
187 |
+
|
188 |
**⚠️ Important Setup:**
|
189 |
+
1. This Hugging Face Space must have `ONE_API_KEY` configured in its **Settings > Secrets**.
|
190 |
2. The process involves uploading large files and intensive AI analysis, so please be patient.
|
191 |
"""
|
192 |
)
|
|
|
200 |
value="Viral Narrative Clip (5-12 mins)"
|
201 |
)
|
202 |
submit_button = gr.Button("🚀 Generate Viral Clip", variant="primary")
|
203 |
+
|
204 |
with gr.Column(scale=2):
|
205 |
summary_output = gr.Textbox(label="Analysis Summary", lines=10, interactive=False)
|
206 |
video_output = gr.Video(label="Generated Clip", interactive=False)
|
207 |
+
|
208 |
submit_button.click(
|
209 |
fn=generate_viral_clip,
|
210 |
inputs=[video_input, srt_input, mode_input],
|
|
|
212 |
)
|
213 |
|
214 |
if __name__ == "__main__":
|
215 |
+
demo.launch(debug=True)
|
216 |
+
|