File size: 5,306 Bytes
cc891cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip, AudioFileClip
import librosa
import librosa.display
import soundfile as sf
import gradio as gr
import tempfile

# Function for displaying progress
def display_progress(percent, message, progress=gr.Progress()):
    progress(percent, desc=message)

# Function for extracting audio from video
def extract_audio(video_path, progress):
    display_progress(0.1, "Extracting audio from video", progress)
    video = VideoFileClip(video_path)
    audio_path = "extracted_audio.wav"
    video.audio.write_audiofile(audio_path)
    display_progress(0.2, "Audio extracted", progress)
    return audio_path

# Function for dividing video into frames
def extract_frames(video_path, progress):
    display_progress(0.3, "Extract frames from video", progress)
    video = cv2.VideoCapture(video_path)
    frames = []
    success, frame = video.read()
    while success:
        frames.append(frame)
        success, frame = video.read()
    video.release()
    display_progress(0.4, "Frames extracted", progress)
    return frames

# Convert frame to spectrogram
def frame_to_spectrogram(frame, sr=22050):
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    S = np.flipud(gray_frame.astype(np.float32) / 255.0 * 100.0)
    y = librosa.griffinlim(S)
    return y

# Saving audio
def save_audio(y, sr=22050):
    audio_path = 'output_frame_audio.wav'
    sf.write(audio_path, y, sr)
    return audio_path

# Saving frame spectrogram
def save_spectrogram_image(S, frame_number, temp_dir):
    plt.figure(figsize=(10, 4))
    librosa.display.specshow(S)
    plt.tight_layout()
    image_path = os.path.join(temp_dir, f'spectrogram_frame_{frame_number}.png')
    plt.savefig(image_path)
    plt.close()
    return image_path

# Processing all video frames
def process_video_frames(frames, sr=22050, temp_dir=None, progress=gr.Progress()):
    processed_frames = []
    total_frames = len(frames)
    for i, frame in enumerate(frames):
        y = frame_to_spectrogram(frame, sr)
        S = librosa.feature.melspectrogram(y=y, sr=sr)
        image_path = save_spectrogram_image(S, i, temp_dir)
        processed_frame = cv2.imread(image_path)
        processed_frames.append(processed_frame)
        display_progress(0.5 + int((i + 1) / total_frames * 0.7), f"Frame processing {i + 1}/{total_frames}", progress)
    display_progress(0.8, "All frames processed", progress)
    return processed_frames

# Saving video from frames
def save_video_from_frames(frames, output_path, fps=30):
    height, width, layers = frames[0].shape
    video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
    for frame in frames:
        video.write(frame)
    video.release()

# Adding audio back to video
def add_audio_to_video(video_path, audio_path, output_path, progress):
    display_progress(0.9, "Adding audio back to video", progress)
    video = VideoFileClip(video_path)
    audio = AudioFileClip(audio_path)
    final_video = video.set_audio(audio)
    final_video.write_videofile(output_path, codec='libx264', audio_codec='aac')
    display_progress(1, "Video's ready", progress)

# Gradio interface
def process_video(video_path, progress=gr.Progress()):
    audio_path = extract_audio(video_path, progress)
    frames = extract_frames(video_path, progress)

    # Creating a temporary folder for saving frames
    with tempfile.TemporaryDirectory() as temp_dir:
        processed_frames = process_video_frames(frames, temp_dir=temp_dir, progress=progress)
        temp_video_path = os.path.join(temp_dir, 'processed_video.mp4')
        save_video_from_frames(processed_frames, temp_video_path)
        output_video_path = 'output_video_with_audio.mp4'
        add_audio_to_video(temp_video_path, audio_path, output_video_path, progress)
        return output_video_path

with gr.Blocks(title='Video from Spectrogram', theme=gr.themes.Soft(primary_hue="green", secondary_hue="green", spacing_size="sm", radius_size="lg")) as iface:
    
    with gr.Group():
        with gr.Row(variant='panel'):
            with gr.Column():
                gr.HTML("<center><h2><a href='https://t.me/pol1trees'>Telegram Channel</a></h2></center>")
            with gr.Column():
                gr.HTML("<center><h2><a href='https://t.me/+GMTP7hZqY0E4OGRi'>Telegram Chat</a></h2></center>")
            with gr.Column():
                gr.HTML("<center><h2><a href='https://www.youtube.com/channel/UCHb3fZEVxUisnqLqCrEM8ZA'>YouTube</a></h2></center>")
            with gr.Column():
                gr.HTML("<center><h2><a href='https://github.com/Bebra777228/Audio-Steganography'>GitHub</a></h2></center>")

    with gr.Column(variant='panel'):
        video_input = gr.Video(label="Upload video")
    with gr.Column(variant='panel'):
        generate_button = gr.Button("Generate")
    with gr.Column(variant='panel'):
        video_output = gr.Video(label="VideoSpectrogram")

        def gradio_video_process_fn(video_input, progress=gr.Progress()):
            return process_video(video_input, progress)

        generate_button.click(
            gradio_video_process_fn,
            inputs=[video_input],
            outputs=[video_output]
        )

iface.launch(share=True)