{"cells":[{"cell_type":"code","execution_count":1,"metadata":{"id":"Rfsio4paNmF-","executionInfo":{"status":"ok","timestamp":1704990774869,"user_tz":-420,"elapsed":61531,"user":{"displayName":"Atmatech Dev","userId":"08685432457477166628"}}},"outputs":[],"source":["%%capture\n","!pip install datasets==1.4.1\n","!pip install transformers\n","!pip install torchaudio\n","!pip install librosa\n","!pip install jiwer\n","!pip install pydub==0.25.1"]},{"cell_type":"code","execution_count":2,"metadata":{"id":"UW_rtVdGdoEt","executionInfo":{"status":"ok","timestamp":1704990805764,"user_tz":-420,"elapsed":5203,"user":{"displayName":"Atmatech Dev","userId":"08685432457477166628"}}},"outputs":[],"source":["import os, sys\n","import torch\n","import torchaudio\n","import librosa\n","import numpy as np\n","from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC\n","from pydub import AudioSegment"]},{"cell_type":"code","source":["from google.colab import drive\n","drive.mount('/content/drive/')"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"WXr3fpZqXdw4","executionInfo":{"status":"ok","timestamp":1704990835065,"user_tz":-420,"elapsed":27776,"user":{"displayName":"Atmatech Dev","userId":"08685432457477166628"}},"outputId":"9b19a117-1f73-4c7d-970d-45f5ae657575"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive/\n"]}]},{"cell_type":"code","source":["use_device = torch.device(\"cpu\")\n","model = Wav2Vec2ForCTC.from_pretrained(\"/content/drive/MyDrive/machine-learning/speech-to-text/huggingface/pytorch/finetuning/dataset-comparison/wav2vec2-xlsr/checkpoint-141200\").to(use_device)\n","processor = Wav2Vec2Processor.from_pretrained(\"/content/drive/MyDrive/machine-learning/speech-to-text/huggingface/pytorch/finetuning/dataset-comparison/wav2vec2-xlsr/processor\")"],"metadata":{"id":"iNIfh73xWXAm"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["audio_path = \"/content/drive/MyDrive/machine-learning/speech-to-text/data/predict/id/testing_1.wav\"\n","filename = os.path.basename(audio_path)\n","audio_filename = filename.split(\".\")[0]\n","audio_format = filename.split(\".\")[1]\n","audio = AudioSegment.from_file_using_temporary_files(audio_path, format=audio_format)\n","audio_filename = str(\"/content/\") + str(audio_filename) + \".mp3\"\n","audio.export(audio_filename, format=\"mp3\", parameters=[\"-ac\", \"1\", \"-ar\", \"16000\"])"],"metadata":{"id":"TDqul7nYWUvb"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["def resample(speech_array, sampling_rate):\n"," if sampling_rate == 48000:\n"," speech_array = librosa.resample(np.asarray(speech_array), 48_000, 16_000)\n","\n"," elif sampling_rate == 44100:\n"," speech_array = librosa.resample(np.asarray(speech_array), 44100, 16_000)\n","\n"," elif sampling_rate == 32000:\n"," speech_array = librosa.resample(np.asarray(speech_array), 32000, 16_000)\n"," return speech_array"],"metadata":{"id":"LuPSy0CVYBXU"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["speech_array, sampling_rate = torchaudio.load(audio_filename)\n","speech_array = resample(speech_array[0].numpy(), sampling_rate)\n","input_values = processor(speech_array, sampling_rate=16_000).input_values[0]\n","input_dict = processor(input_values, return_tensors=\"pt\", padding=True)"],"metadata":{"id":"7jzbDTW0YGun"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["logits = model(input_dict.input_values.to(use_device)).logits\n","pred_ids = torch.argmax(logits, dim=-1)[0]\n","text = processor.decode(pred_ids)\n","print(text)"],"metadata":{"id":"EM3SNKTQYJtW"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"p_zvrwOfN-9Y"},"outputs":[],"source":["import torch\n","from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\n","from torchaudio.transforms import Resample\n","from IPython.display import Audio\n","from pydub import AudioSegment\n","\n","# Ganti dengan jalur file model hasil fine-tuning\n","fine_tuned_model_path = \"path_to_your_fine_tuned_model\"\n","\n","# Load the processor and fine-tuned model\n","processor = Wav2Vec2Processor.from_pretrained(fine_tuned_model_path)\n","model = Wav2Vec2ForCTC.from_pretrained(fine_tuned_model_path)\n","\n","# Load the audio file in mp3 format\n","mp3_audio_file_path = \"path_to_your_audio_file.mp3\"\n","audio = AudioSegment.from_file(mp3_audio_file_path, format=\"mp3\")\n","\n","# Resample the audio if needed\n","waveform, sample_rate = torch.tensor(audio.raw_data).to(torch.float32), audio.frame_rate\n","if sample_rate != processor.feature_extractor.sampling_rate:\n"," resampler = Resample(orig_freq=sample_rate, new_freq=processor.feature_extractor.sampling_rate)\n"," waveform = resampler(waveform)\n","\n","# Tokenize and transcribe\n","input_values = processor(waveform.numpy(), return_tensors=\"pt\", padding=\"longest\").input_values\n","with torch.no_grad():\n"," logits = model(input_values).logits\n"," predicted_ids = torch.argmax(logits, dim=-1)\n","\n","# Decode the predicted transcription\n","transcription = processor.batch_decode(predicted_ids)[0]\n","print(\"Predicted Transcription:\", transcription)\n","\n","# Play the original mp3 audio\n","Audio(mp3_audio_file_path)\n"]}],"metadata":{"colab":{"provenance":[],"authorship_tag":"ABX9TyMj/sdpm6gj8SN2GzFFGg8D"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}