Spaces:
Sleeping
Sleeping
import numpy as np | |
import matplotlib.pyplot as plt | |
from PIL import Image, ImageDraw, ImageFont | |
import librosa | |
import librosa.display | |
import gradio as gr | |
import soundfile as sf | |
import os | |
import gettext | |
import os | |
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" | |
if not os.path.exists(font_path): | |
font_path = "/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf" # Fallback font | |
# Handle missing translation files | |
locales_dir = 'locales' | |
try: | |
lang = gettext.translation('base', localedir=locales_dir, languages=['id']) | |
lang.install() | |
_ = lang.gettext | |
except FileNotFoundError: | |
print("Translation file not found, using default language.") | |
_ = lambda s: s # Fallback to the original string if translation is unavailable | |
# Function for creating a spectrogram image with text | |
def text_to_spectrogram_image(text, base_width=512, height=256, max_font_size=80, margin=10, letter_spacing=5): | |
font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" | |
if os.path.exists(font_path): | |
font = ImageFont.truetype(font_path, max_font_size) | |
else: | |
font = ImageFont.load_default() | |
image = Image.new('L', (base_width, height), 'black') | |
draw = ImageDraw.Draw(image) | |
text_width = 0 | |
for char in text: | |
text_bbox = draw.textbbox((0, 0), char, font=font) | |
text_width += text_bbox[2] - text_bbox[0] + letter_spacing | |
text_width -= letter_spacing | |
if text_width + margin * 2 > base_width: | |
width = text_width + margin * 2 | |
else: | |
width = base_width | |
image = Image.new('L', (width, height), 'black') | |
draw = ImageDraw.Draw(image) | |
text_x = (width - text_width) // 2 | |
text_y = (height - (text_bbox[3] - text_bbox[1])) // 2 | |
for char in text: | |
draw.text((text_x, text_y), char, font=font, fill='white') | |
char_bbox = draw.textbbox((0, 0), char, font=font) | |
text_x += char_bbox[2] - char_bbox[0] + letter_spacing | |
image = np.array(image) | |
image = np.where(image > 0, 255, image) | |
return image | |
# Converting an image to audio | |
def spectrogram_image_to_audio(image, sr=22050): | |
flipped_image = np.flipud(image) | |
S = flipped_image.astype(np.float32) / 255.0 * 100.0 | |
y = librosa.griffinlim(S) | |
return y | |
# Function for creating an audio file and spectrogram from text | |
def create_audio_with_spectrogram(text, base_width, height, max_font_size, margin, letter_spacing): | |
spec_image = text_to_spectrogram_image(text, base_width, height, max_font_size, margin, letter_spacing) | |
y = spectrogram_image_to_audio(spec_image) | |
audio_path = 'output.wav' | |
sf.write(audio_path, y, 22050) | |
image_path = 'spectrogram.png' | |
plt.imsave(image_path, spec_image, cmap='gray') | |
return audio_path, image_path | |
# Function for displaying the spectrogram of an audio file | |
def display_audio_spectrogram(audio_path): | |
y, sr = librosa.load(audio_path) | |
S = librosa.feature.melspectrogram(y=y, sr=sr) | |
S_dB = librosa.power_to_db(S, ref=np.max) | |
plt.figure(figsize=(10, 4)) | |
librosa.display.specshow(S_dB) | |
plt.tight_layout() | |
spectrogram_path = 'uploaded_spectrogram.png' | |
plt.savefig(spectrogram_path) | |
plt.close() | |
return spectrogram_path | |
# Converting a downloaded image to an audio spectrogram | |
def image_to_spectrogram_audio(image_path, sr=22050): | |
image = Image.open(image_path).convert('L') | |
image = np.array(image) | |
y = spectrogram_image_to_audio(image, sr) | |
img2audio_path = 'image_to_audio_output.wav' | |
sf.write(img2audio_path, y, sr) | |
return img2audio_path | |
informstion = _(""" | |
<!DOCTYPE html> | |
<html lang="en"> | |
<head> | |
<meta charset="UTF-8"> | |
<meta name="viewport" content="width=device-width, initial-scale=1.0"> | |
<title>Steganography Information</title> | |
</head> | |
<body> | |
<h1>Ha-Ha-Ha, I'm laughing at you.</h1> | |
<p>People, before using this interface, read about what Steganography is.</p> | |
<h2>What is STEGANOGRAPHY?</h2> | |
<p> | |
Steganography is a method of hiding information within other information or a physical object in such a way | |
that it cannot be detected. Using steganography, you can hide almost any digital content, including texts, | |
images, audio, and video files. | |
</p> | |
<p> | |
In this interface, steganography is used to hide text or an image in the spectrogram of a sound. | |
</p> | |
<img src="https://github.com/user-attachments/assets/972b9e72-d8dc-43f7-a57a-a09a44aa5419" alt="Hidden Image 1"> | |
<img src="https://github.com/user-attachments/assets/3ceec1ff-afce-4b4a-a387-2b6e589234f7" alt="Hidden Image 2"> | |
</body> | |
</html> | |
""") | |
# Gradio interface | |
with gr.Blocks( | |
title=_('Audio Steganography'), | |
theme="Hev832/Applio", | |
) as iface: | |
gr.Markdown(_("# Audio Steganography")) | |
with gr.Group(): | |
with gr.Row(variant='panel'): | |
with gr.Column(): | |
gr.HTML(_("<center><h2><a href='https://t.me/pol1trees'>Telegram Channel</a></h2></center>")) | |
with gr.Column(): | |
gr.HTML(_("<center><h2><a href='https://t.me/+GMTP7hZqY0E4OGRi'>Telegram Chat</a></h2></center>")) | |
with gr.Column(): | |
gr.HTML(_("<center><h2><a href='https://www.youtube.com/channel/UCHb3fZEVxUisnqLqCrEM8ZA'>YouTube</a></h2></center>")) | |
with gr.Column(): | |
gr.HTML(_("<center><h2><a href='https://github.com/Bebra777228/Audio-Steganography'>GitHub</a></h2></center>")) | |
with gr.Tab(_("INFO")): | |
gr.HTML(informstion) | |
with gr.Tab(_("Text to Spectrogram")): | |
gr.HTML(_("<center><h2>Oh my god people, learn to read. Go to the “INFO” tab, it says what this interface is and what it is for, don't be idiots.</h2></center>")) | |
with gr.Group(): | |
text = gr.Textbox(lines=2, placeholder=_("Enter your text:"), label=_("Text")) | |
with gr.Row(variant='panel'): | |
base_width = gr.Slider(value=512, label=_("Image Width"), visible=False) | |
height = gr.Slider(value=256, label=_("Image Height"), visible=False) | |
max_font_size = gr.Slider(minimum=10, maximum=130, step=5, value=80, label=_("Font size")) | |
margin = gr.Slider(minimum=0, maximum=50, step=1, value=10, label=_("Indent")) | |
letter_spacing = gr.Slider(minimum=0, maximum=50, step=1, value=5, label=_("Letter spacing")) | |
generate_button = gr.Button(_("Generate")) | |
with gr.Column(variant='panel'): | |
with gr.Group(): | |
output_audio = gr.Audio(type="filepath", label=_("Generated audio")) | |
output_image = gr.Image(type="filepath", label=_("Spectrogram")) | |
def gradio_interface_fn(text, base_width, height, max_font_size, margin, letter_spacing): | |
print("\n", text) | |
return create_audio_with_spectrogram(text, base_width, height, max_font_size, margin, letter_spacing) | |
generate_button.click( | |
gradio_interface_fn, | |
inputs=[text, base_width, height, max_font_size, margin, letter_spacing], | |
outputs=[output_audio, output_image] | |
) | |
with gr.Tab(_("Image to Spectrogram")): | |
gr.HTML(_("<center><h2>Oh my god people, learn to read. Go to the “INFO” tab, it says what this interface is and what it is for, don't be idiots.</h2></center>")) | |
with gr.Group(): | |
with gr.Row(variant='panel'): | |
upload_image = gr.Image(type="filepath", label=_("Upload image")) | |
convert_button = gr.Button(_("Convert to audio")) | |
with gr.Column(variant='panel'): | |
output_audio_from_image = gr.Audio(type="filepath", label=_("Generated audio")) | |
def gradio_image_to_audio_fn(upload_image): | |
return image_to_spectrogram_audio(upload_image) | |
convert_button.click( | |
gradio_image_to_audio_fn, | |
inputs=[upload_image], | |
outputs=[output_audio_from_image] | |
) | |
with gr.Tab(_("Audio Spectrogram")): | |
with gr.Group(): | |
with gr.Row(variant='panel'): | |
upload_audio = gr.Audio(type="filepath", label=_("Upload audio"), scale=3) | |
decode_button = gr.Button(_("Show spectrogram"), scale=2) | |
with gr.Column(variant='panel'): | |
decoded_image = gr.Image(type="filepath", label=_("Audio Spectrogram")) | |
def gradio_decode_fn(upload_audio): | |
return display_audio_spectrogram(upload_audio) | |
decode_button.click( | |
gradio_decode_fn, | |
inputs=[upload_audio], | |
outputs=[decoded_image] | |
) | |
iface.launch(share=True, debug=True) | |