Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
-
|
4 |
-
from youtube_transcript_api.formatters import TextFormatter
|
5 |
import openai
|
6 |
import os
|
7 |
import gradio as gr
|
@@ -25,20 +24,36 @@ def get_yt_video_id(url):
|
|
25 |
else:
|
26 |
raise ValueError("μ ν¨ν μ νλΈ λ§ν¬κ° μλλλ€.")
|
27 |
|
28 |
-
#
|
29 |
-
def
|
30 |
-
|
31 |
-
|
|
|
|
|
32 |
|
33 |
-
#
|
34 |
-
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
text = text.replace("\n", " ")
|
39 |
-
return text
|
40 |
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def textToSummary(text):
|
43 |
openai.api_key = os.getenv("OPENAI_API_KEY") # νκ²½ λ³μμμ OpenAI API ν€ κ°μ Έμ€κΈ°
|
44 |
response = openai.Completion.create(
|
@@ -55,16 +70,18 @@ def textToSummary(text):
|
|
55 |
# μ 체 μμ½ νλ‘μΈμ€λ₯Ό μ²λ¦¬νλ ν¨μ
|
56 |
def summarize(url):
|
57 |
try:
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
62 |
return summary
|
63 |
except Exception as e:
|
64 |
return f"μμ½μ μ€ν¨νμ΅λλ€: {str(e)}"
|
65 |
|
66 |
# Gradio μΈν°νμ΄μ€ μ€μ
|
67 |
-
description = "
|
68 |
|
69 |
gr.Interface(fn=summarize,
|
70 |
inputs="text",
|
|
|
1 |
+
from pytube import YouTube
|
2 |
+
from google.cloud import speech_v1p1beta1 as speech
|
3 |
+
import io
|
|
|
4 |
import openai
|
5 |
import os
|
6 |
import gradio as gr
|
|
|
24 |
else:
|
25 |
raise ValueError("μ ν¨ν μ νλΈ λ§ν¬κ° μλλλ€.")
|
26 |
|
27 |
+
# μ€λμ€ μΆμΆ ν¨μ
|
28 |
+
def download_audio(youtube_url):
|
29 |
+
yt = YouTube(youtube_url)
|
30 |
+
stream = yt.streams.filter(only_audio=True).first()
|
31 |
+
audio_path = stream.download(filename="audio.mp4")
|
32 |
+
return audio_path
|
33 |
|
34 |
+
# Google Speech-to-Text APIλ₯Ό μ¬μ©νμ¬ μ€λμ€λ₯Ό ν
μ€νΈλ‘ λ³ν
|
35 |
+
def speech_to_text(audio_path):
|
36 |
+
client = speech.SpeechClient()
|
37 |
|
38 |
+
with io.open(audio_path, "rb") as audio_file:
|
39 |
+
content = audio_file.read()
|
|
|
|
|
40 |
|
41 |
+
audio = speech.RecognitionAudio(content=content)
|
42 |
+
config = speech.RecognitionConfig(
|
43 |
+
encoding=speech.RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED,
|
44 |
+
sample_rate_hertz=16000,
|
45 |
+
language_code="ko-KR" # νκ΅μ΄ μΈμ
|
46 |
+
)
|
47 |
+
|
48 |
+
response = client.recognize(config=config, audio=audio)
|
49 |
+
|
50 |
+
transcript = ""
|
51 |
+
for result in response.results:
|
52 |
+
transcript += result.alternatives[0].transcript + " "
|
53 |
+
|
54 |
+
return transcript.strip()
|
55 |
+
|
56 |
+
# ν
μ€νΈλ₯Ό μμ½νλ ν¨μ (OpenAI API μ¬μ©)
|
57 |
def textToSummary(text):
|
58 |
openai.api_key = os.getenv("OPENAI_API_KEY") # νκ²½ λ³μμμ OpenAI API ν€ κ°μ Έμ€κΈ°
|
59 |
response = openai.Completion.create(
|
|
|
70 |
# μ 체 μμ½ νλ‘μΈμ€λ₯Ό μ²λ¦¬νλ ν¨μ
|
71 |
def summarize(url):
|
72 |
try:
|
73 |
+
# μ νλΈ μ€λμ€ λ€μ΄λ‘λ
|
74 |
+
audio_path = download_audio(url)
|
75 |
+
# μμ±μ ν
μ€νΈλ‘ λ³ν
|
76 |
+
transcript = speech_to_text(audio_path)
|
77 |
+
# ν
μ€νΈ μμ½
|
78 |
+
summary = textToSummary(transcript)
|
79 |
return summary
|
80 |
except Exception as e:
|
81 |
return f"μμ½μ μ€ν¨νμ΅λλ€: {str(e)}"
|
82 |
|
83 |
# Gradio μΈν°νμ΄μ€ μ€μ
|
84 |
+
description = "μ νλΈ λμμμ μλ§μ΄ μλλΌλ μμ± μΈμ κΈ°λ₯μ μ¬μ©ν΄ μμ½ν©λλ€."
|
85 |
|
86 |
gr.Interface(fn=summarize,
|
87 |
inputs="text",
|