Viewing File: /home/ubuntu/codegamaai-test/voice_clone/notebooks/.ipynb_checkpoints/RVC_Clone-checkpoint.ipynb

{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "d714ec09",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "de0bc82a07684c66aa442c58852e4c97",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Button(button_style='success', description='✔ Success', style=ButtonStyle())"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "!pip install gradio\n",
    "clear_output()\n",
    "Button(description=\"\\u2714 Success\", button_style=\"success\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e3527ddf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/haive/voice_clone_rvc\n"
     ]
    }
   ],
   "source": [
    "cd /home/haive/voice_clone_rvc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "97f77d88",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[0m\u001b[01;34mdataset\u001b[0m/  RVC_Clone.ipynb  \u001b[01;34mrvc_implementation\u001b[0m/  \u001b[01;34mVoiceConversionWebUI\u001b[0m/\r\n"
     ]
    }
   ],
   "source": [
    "ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "33b91808",
   "metadata": {},
   "outputs": [],
   "source": [
    "from IPython.display import clear_output\n",
    "from ipywidgets import Button\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "dab76631",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--2023-11-16 08:07:36--  https://huggingface.co/Rejekts/project/resolve/main/project-main.zip\n",
      "Resolving huggingface.co (huggingface.co)... 18.238.49.10, 18.238.49.70, 18.238.49.112, ...\n",
      "Connecting to huggingface.co (huggingface.co)|18.238.49.10|:443... connected.\n",
      "HTTP request sent, awaiting response... 302 Found\n",
      "Location: https://cdn-lfs.huggingface.co/repos/75/c8/75c8d4b069a1863fc7f85ffabaff4b3755f0b320f4254aad27abf080b56d6f7c/dcfb833d0514f8d3558abb5fca9975f9093c75ff7b316fd395e2b80e358a3769?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27project-main.zip%3B+filename%3D%22project-main.zip%22%3B&response-content-type=application%2Fzip&Expires=1700381256&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMDM4MTI1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy83NS9jOC83NWM4ZDRiMDY5YTE4NjNmYzdmODVmZmFiYWZmNGIzNzU1ZjBiMzIwZjQyNTRhYWQyN2FiZjA4MGI1NmQ2ZjdjL2RjZmI4MzNkMDUxNGY4ZDM1NThhYmI1ZmNhOTk3NWY5MDkzYzc1ZmY3YjMxNmZkMzk1ZTJiODBlMzU4YTM3Njk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=Cm3Upo7Zibh5fREHF0nlLLA%7ERCalOW3PNa-xHM1%7EaPFAFb939GyZVIbFhTGT2ZDHa2pdxxfW4BCIX-umB78-oZWECCthqa7n4IEnKjNj2dPwdNGiv6PrtsbazKvW10DeLT18tR9xYKoj5HJpC37eLqrRIlc7ZM6WcDEUD03XcpecoyX0IWCYSnnIoG9RhbYZ2D0ovU%7EYHJd3HE1EFBZZECekHdu-C46RHbuz-cnrD9LYf5-VeVlp1rKmVd92dgz72-65Q39Yfa1c%7EP1vQST8jC1x0AA3ogcaVLsFEXacQ71hlWi70-mDiCNn2DF1XXRoTDuCBVJJFYWwKdZ3AEGUYw__&Key-Pair-Id=KVTP0A1DKRTAX [following]\n",
      "--2023-11-16 08:07:36--  https://cdn-lfs.huggingface.co/repos/75/c8/75c8d4b069a1863fc7f85ffabaff4b3755f0b320f4254aad27abf080b56d6f7c/dcfb833d0514f8d3558abb5fca9975f9093c75ff7b316fd395e2b80e358a3769?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27project-main.zip%3B+filename%3D%22project-main.zip%22%3B&response-content-type=application%2Fzip&Expires=1700381256&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcwMDM4MTI1Nn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy5odWdnaW5nZmFjZS5jby9yZXBvcy83NS9jOC83NWM4ZDRiMDY5YTE4NjNmYzdmODVmZmFiYWZmNGIzNzU1ZjBiMzIwZjQyNTRhYWQyN2FiZjA4MGI1NmQ2ZjdjL2RjZmI4MzNkMDUxNGY4ZDM1NThhYmI1ZmNhOTk3NWY5MDkzYzc1ZmY3YjMxNmZkMzk1ZTJiODBlMzU4YTM3Njk%7EcmVzcG9uc2UtY29udGVudC1kaXNwb3NpdGlvbj0qJnJlc3BvbnNlLWNvbnRlbnQtdHlwZT0qIn1dfQ__&Signature=Cm3Upo7Zibh5fREHF0nlLLA%7ERCalOW3PNa-xHM1%7EaPFAFb939GyZVIbFhTGT2ZDHa2pdxxfW4BCIX-umB78-oZWECCthqa7n4IEnKjNj2dPwdNGiv6PrtsbazKvW10DeLT18tR9xYKoj5HJpC37eLqrRIlc7ZM6WcDEUD03XcpecoyX0IWCYSnnIoG9RhbYZ2D0ovU%7EYHJd3HE1EFBZZECekHdu-C46RHbuz-cnrD9LYf5-VeVlp1rKmVd92dgz72-65Q39Yfa1c%7EP1vQST8jC1x0AA3ogcaVLsFEXacQ71hlWi70-mDiCNn2DF1XXRoTDuCBVJJFYWwKdZ3AEGUYw__&Key-Pair-Id=KVTP0A1DKRTAX\n",
      "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 18.164.116.15, 18.164.116.111, 18.164.116.106, ...\n",
      "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|18.164.116.15|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 1510847 (1.4M) [application/zip]\n",
      "Saving to: ‘/home/haive/voice_clone_rvc/project-main.zip’\n",
      "\n",
      "/home/haive/voice_c 100%[===================>]   1.44M  --.-KB/s    in 0.04s   \n",
      "\n",
      "2023-11-16 08:07:36 (34.8 MB/s) - ‘/home/haive/voice_clone_rvc/project-main.zip’ saved [1510847/1510847]\n",
      "\n",
      "Archive:  project-main.zip\n",
      "1938c86022d19218de9a37c2af53423ea111b80e\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.env  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/PULL_REQUEST_TEMPLATE.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/docker.yml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/genlocale.yml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/pull_format.yml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/push_format.yml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.github/workflows/unitest.yml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/.gitignore  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/Dockerfile  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/GUI.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/LICENSE  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/MIT协议暨相关引用库协议  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/README.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/Retrieval_based_Voice_Conversion_WebUI.ipynb  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/a.png  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/app.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/hubert/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/hubert/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/pretrained/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/pretrained/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/pretrained_v2/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/pretrained_v2/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/rmvpe/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/rmvpe/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/uvr5_weights/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/uvr5_weights/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/weights/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/assets/weights/.gitignore  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/audios/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/audios/somegirl.mp3  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/audios/someguy.mp3  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/audios/unachica.mp3  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/audios/unchico.mp3  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/config.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/config.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v1/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v1/32k.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v1/40k.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v1/48k.json  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v2/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v2/32k.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/configs/v2/48k.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docker-compose.yml  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/cn/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/cn/Changelog_CN.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/cn/faq.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/Changelog_EN.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/README.en.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/faiss_tips_en.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/faq_en.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/en/training_tips_en.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/Changelog_FR.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/README.fr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/faiss_tips_fr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/faq_fr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/fr/training_tips_fr.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/jp/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/jp/README.ja.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/jp/faiss_tips_ja.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/jp/training_tips_ja.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/Changelog_KO.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/README.ko.han.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/README.ko.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/faiss_tips_ko.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/kr/training_tips_ko.md  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/Changelog_TR.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/README.tr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/faiss_tips_tr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/faq_tr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/tr/training_tips_tr.md  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/docs/小白简易教程.doc  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/download_files.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/environment_dml.yaml  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/go-realtime-gui-dml.bat  \n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/go-realtime-gui.bat  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/go-web-dml.bat  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/go-web.bat  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/gui_v1.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/i18n.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/en_US.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/es_ES.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/fr_FR.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/it_IT.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/ja_JP.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/ru_RU.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/tr_TR.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/zh_CN.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/zh_HK.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/zh_SG.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale/zh_TW.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/locale_diff.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/i18n/scan_i18n.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer-web.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/audio.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/attentions.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/commons.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/models.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/models_onnx.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py  \n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/modules/F0Predictor/__init__.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/onnx_inference.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/infer_pack/transforms.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/rmvpe.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/slicer2.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/data_utils.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/losses.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/mel_processing.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/process_ckpt.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/train/utils.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/dataset.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_123812KB .py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_33966KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_537238KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/layers_new.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/model_param_init.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr16000_hl512.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr32000_hl512.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr33075_hl384.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl1024.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl256.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/1band_sr44100_hl512_cut.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/2band_32000.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/2band_44100_lofi.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/2band_48000.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/3band_44100.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/3band_44100_mid.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/3band_44100_msb2.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100_mid.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100_msb2.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100_reverse.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_44100_sw.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2_sn.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/modelparams/ensemble.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_123812KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_123821KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_33966KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_537227KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_537238KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_61968KB.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/nets_new.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/lib_v5/spec_utils.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/name_params.json  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/lib/uvr5_pack/utils.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/ipex/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/ipex/__init__.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/ipex/attention.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/ipex/gradscaler.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/ipex/hijacks.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/onnx/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/onnx/export.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/extract/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/extract/extract_f0_print.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/extract/extract_f0_rmvpe.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/extract/extract_f0_rmvpe_dml.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/extract_feature_print.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/preprocess.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/train/train.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/mdxnet.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/modules.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/preprocess.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/vc/\n",
      " extracting: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/vc/__init__.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/vc/modules.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/vc/pipeline.py  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/vc/utils.py  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/0_gt_wavs/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/0_gt_wavs/mute32k.wav  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/0_gt_wavs/mute40k.wav  \n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/0_gt_wavs/mute48k.wav  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/1_16k_wavs/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/1_16k_wavs/mute.wav  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/2a_f0/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/2a_f0/mute.wav.npy  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/2b-f0nsf/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/2b-f0nsf/mute.wav.npy  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/3_feature256/\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/3_feature256/mute.npy  \n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/3_feature768/\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/logs/mute/3_feature768/mute.npy  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/poetry.lock  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/pyproject.toml  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements-dml.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements-ipex.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements-safe.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements-win-for-realtime_vc_gui-dml.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements-win-for-realtime_vc_gui.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/requirements.txt  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/run.sh  \r\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/\r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/app.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/calc_rvc_model_similarity.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/dlmodels.bat  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/dlmodels.sh  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/export_onnx.py  \r\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer/\r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer/infer-pm-index256.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer/train-index-v2.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer/train-index.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer/trans_weights.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer_batch_rvc.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/infer_cli.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/onnx_inference_demo.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/rvc_for_realtime.py  \r\n",
      "   creating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/torchgate/\r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/torchgate/__init__.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/torchgate/torchgate.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/tools/torchgate/utils.py  \r\n",
      "  inflating: /home/haive/voice_clone_rvc/rvc_implementation/project-main/venv.sh  \r\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists('/home/haive/voice_clone_rvc'):\n",
    "    print(\"Folder Not Available. Creating New Folder.\")\n",
    "    os.makedirs('/home/haive/voice_clone_rvc')\n",
    "source = \"Rejekts\"\n",
    "!wget https://huggingface.co/{source}/project/resolve/main/project-main.zip -O '/home/haive/voice_clone_rvc/project-main.zip' && unzip -n 'project-main.zip' -d /home/haive/voice_clone_rvc/rvc_implementation/\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "1bbc6f88",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ed6ee79a9645425f827960d9b7ab81e1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Button(button_style='success', description='✔ Success', style=ButtonStyle())"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "!cd '/home/haive/voice_clone_rvc/rvc_implementation/project-main' && python download_files.py && pip install -r 'requirements-safe.txt'\n",
    "!pip install pyngrok\n",
    "!rm /home/haive/voice_clone_rvc/project-main.zip\n",
    "!rm -r /home/haive/voice_clone_rvc/sample_data\n",
    "!mkdir -p /home/haive/voice_clone_rvc/dataset\n",
    "clear_output()\n",
    "Button(description=\"\\u2714 Success\", button_style=\"success\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "692fd3d2",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3699bafea0cd40e19b709fefd9b4fddc",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Button(button_style='success', description='✔ Success', style=ButtonStyle())"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#@title 1.Preprocess Data\n",
    "%cd /home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
    "model_name = 'test2' #@param {type:\"string\"}\n",
    "#@markdown <small> Enter the path to your dataset folder (a folder with audios of the vocals you will train on), or if you want just upload the audios using the File Manager into the 'dataset' folder.\n",
    "dataset_folder = '/home/haive/voice_clone_rvc/dataset/test2' #@param {type:\"string\"}\n",
    "while len(os.listdir(dataset_folder)) < 1:\n",
    "    input(\"Your dataset folder is empty.\")\n",
    "!mkdir -p ./logs/{model_name}\n",
    "with open(f'./logs/{model_name}/preprocess.log','w') as f:\n",
    "    print(\"Starting...\")\n",
    "!python infer/modules/train/preprocess.py {dataset_folder} 40000 2 ./logs/{model_name} False 3.0 > /dev/null 2>&1\n",
    "with open(f'./logs/{model_name}/preprocess.log','r') as f:\n",
    "    if 'end preprocess' in f.read():\n",
    "        clear_output()\n",
    "        display(Button(description=\"\\u2714 Success\", button_style=\"success\"))\n",
    "    else:\n",
    "        print(\"Error preprocessing data... Make sure your dataset folder is correct.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "1699772d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e6eeaffb9c6c4252a2bb8a3d828a2d02",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Button(button_style='success', description='✔ Success', style=ButtonStyle())"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#@title 2.Extract Features\n",
    "f0method = \"rmvpe_gpu\" # @param [\"pm\", \"harvest\", \"rmvpe\", \"rmvpe_gpu\"]\n",
    "%cd /home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
    "with open(f'./logs/{model_name}/extract_f0_feature.log','w') as f:\n",
    "    print(\"Starting...\")\n",
    "if f0method != \"rmvpe_gpu\":\n",
    "    !python infer/modules/train/extract/extract_f0_print.py ./logs/{model_name} 2 {f0method}\n",
    "else:\n",
    "    !python infer/modules/train/extract/extract_f0_rmvpe.py 1 0 0 ./logs/{model_name} True\n",
    "!python infer/modules/train/extract_feature_print.py cuda:0 1 0 0 ./logs/{model_name} v2\n",
    "with open(f'./logs/{model_name}/extract_f0_feature.log','r') as f:\n",
    "    if 'all-feature-done' in f.read():\n",
    "        clear_output()\n",
    "        display(Button(description=\"\\u2714 Success\", button_style=\"success\"))\n",
    "    else:\n",
    "        print(\"Error preprocessing data... Make sure your data was preprocessed.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "fbe7950c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e93dd320bea0408cb7bb2974b3f5ecf4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Button(button_style='success', description='✔ Success', style=ButtonStyle())"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#@title 3.Train Index\n",
    "import numpy as np\n",
    "import faiss\n",
    "%cd /home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
    "def train_index(exp_dir1, version19):\n",
    "    exp_dir = \"logs/%s\" % (exp_dir1)\n",
    "    os.makedirs(exp_dir, exist_ok=True)\n",
    "    feature_dir = (\n",
    "        \"%s/3_feature256\" % (exp_dir)\n",
    "        if version19 == \"v1\"\n",
    "        else \"%s/3_feature768\" % (exp_dir)\n",
    "    )\n",
    "    if not os.path.exists(feature_dir):\n",
    "        return \"feature_dir path doesn't exist\"\n",
    "    listdir_res = list(os.listdir(feature_dir))\n",
    "    if len(listdir_res) == 0:\n",
    "        return \"feature_dir path doesn't contain any data\"\n",
    "    infos = []\n",
    "    npys = []\n",
    "    for name in sorted(listdir_res):\n",
    "        phone = np.load(\"%s/%s\" % (feature_dir, name))\n",
    "        npys.append(phone)\n",
    "    big_npy = np.concatenate(npys, 0)\n",
    "    big_npy_idx = np.arange(big_npy.shape[0])\n",
    "    np.random.shuffle(big_npy_idx)\n",
    "    big_npy = big_npy[big_npy_idx]\n",
    "    if big_npy.shape[0] > 2e5:\n",
    "        infos.append(\"Trying doing kmeans %s shape to 10k centers.\" % big_npy.shape[0])\n",
    "        yield \"\\n\".join(infos)\n",
    "        try:\n",
    "            big_npy = (\n",
    "                MiniBatchKMeans(\n",
    "                    n_clusters=10000,\n",
    "                    verbose=True,\n",
    "                    batch_size=256 * config.n_cpu,\n",
    "                    compute_labels=False,\n",
    "                    init=\"random\",\n",
    "                )\n",
    "                .fit(big_npy)\n",
    "                .cluster_centers_\n",
    "            )\n",
    "        except:\n",
    "            info = traceback.format_exc()\n",
    "            logger.info(info)\n",
    "            infos.append(info)\n",
    "            yield \"\\n\".join(infos)\n",
    "\n",
    "    np.save(\"%s/total_fea.npy\" % exp_dir, big_npy)\n",
    "    n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)\n",
    "    infos.append(\"%s,%s\" % (big_npy.shape, n_ivf))\n",
    "    yield \"\\n\".join(infos)\n",
    "    index = faiss.index_factory(256 if version19 == \"v1\" else 768, \"IVF%s,Flat\" % n_ivf)\n",
    "    infos.append(\"training\")\n",
    "    yield \"\\n\".join(infos)\n",
    "    index_ivf = faiss.extract_index_ivf(index)  #\n",
    "    index_ivf.nprobe = 1\n",
    "    index.train(big_npy)\n",
    "    faiss.write_index(\n",
    "        index,\n",
    "        \"%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index\"\n",
    "        % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),\n",
    "    )\n",
    "\n",
    "    infos.append(\"adding\")\n",
    "    yield \"\\n\".join(infos)\n",
    "    batch_size_add = 8192\n",
    "    for i in range(0, big_npy.shape[0], batch_size_add):\n",
    "        index.add(big_npy[i : i + batch_size_add])\n",
    "    faiss.write_index(\n",
    "        index,\n",
    "        \"%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index\"\n",
    "        % (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19),\n",
    "    )\n",
    "    infos.append(\n",
    "        \"成功构建索引,added_IVF%s_Flat_nprobe_%s_%s_%s.index\"\n",
    "        % (n_ivf, index_ivf.nprobe, exp_dir1, version19)\n",
    "    )\n",
    "\n",
    "training_log = train_index(model_name, 'v2')\n",
    "\n",
    "for line in training_log:\n",
    "    print(line)\n",
    "    if 'adding' in line:\n",
    "        clear_output()\n",
    "        display(Button(description=\"\\u2714 Success\", button_style=\"success\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "837f2efe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Authtoken saved to configuration file: /home/haive/.ngrok2/ngrok.yml\n",
      "/home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
      "The tensorboard extension is already loaded. To reload it, use:\n",
      "  %reload_ext tensorboard\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Reusing TensorBoard on port 8888 (pid 2720241), started 4:48:04 ago. (Use '!kill 2720241' to kill it.)"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "\n",
       "      <iframe id=\"tensorboard-frame-36254e2cd81499cd\" width=\"100%\" height=\"800\" frameborder=\"0\">\n",
       "      </iframe>\n",
       "      <script>\n",
       "        (function() {\n",
       "          const frame = document.getElementById(\"tensorboard-frame-36254e2cd81499cd\");\n",
       "          const url = new URL(\"/\", window.location);\n",
       "          const port = 8888;\n",
       "          if (port) {\n",
       "            url.port = port;\n",
       "          }\n",
       "          frame.src = url;\n",
       "        })();\n",
       "      </script>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tensorboard NGROK URL:NgrokTunnel: \"https://b5a7-162-244-83-121.ngrok-free.app\" -> \"http://localhost:8888\"\n",
      "Write filelist done\n",
      "Use gpus: 0\n",
      "INFO:test2:{'data': {'filter_length': 2048, 'hop_length': 400, 'max_wav_value': 32768.0, 'mel_fmax': None, 'mel_fmin': 0.0, 'n_mel_channels': 125, 'sampling_rate': 40000, 'win_length': 2048, 'training_files': './logs/test2/filelist.txt'}, 'model': {'filter_channels': 768, 'gin_channels': 256, 'hidden_channels': 192, 'inter_channels': 192, 'kernel_size': 3, 'n_heads': 2, 'n_layers': 6, 'p_dropout': 0, 'resblock': '1', 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'resblock_kernel_sizes': [3, 7, 11], 'spk_embed_dim': 109, 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'upsample_rates': [10, 10, 2, 2], 'use_spectral_norm': False}, 'train': {'batch_size': 7, 'betas': [0.8, 0.99], 'c_kl': 1.0, 'c_mel': 45, 'epochs': 20000, 'eps': 1e-09, 'fp16_run': True, 'init_lr_ratio': 1, 'learning_rate': 0.0001, 'log_interval': 200, 'lr_decay': 0.999875, 'seed': 1234, 'segment_size': 12800, 'warmup_epochs': 0}, 'model_dir': './logs/test2', 'experiment_dir': './logs/test2', 'save_every_epoch': 50, 'name': 'test2', 'total_epoch': 250, 'pretrainG': 'assets/pretrained_v2/f0G40k.pth', 'pretrainD': 'assets/pretrained_v2/f0D40k.pth', 'version': 'v2', 'gpus': '0', 'sample_rate': '40k', 'if_f0': 1, 'if_latest': 1, 'save_every_weights': '1', 'if_cache_data_in_gpu': 0}\n",
      "/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\n",
      "warnings.warn(\"torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.\")\n",
      "DEBUG:infer.lib.infer_pack.models:gin_channels: 256, self.spk_embed_dim: 109\n",
      "DEBUG:test2:./logs/test2/D_2333333.pth\n",
      "INFO:test2:Loaded model weights\n",
      "INFO:test2:Loaded checkpoint './logs/test2/D_2333333.pth' (epoch 250)\n",
      "INFO:test2:loaded D\n",
      "DEBUG:test2:./logs/test2/G_2333333.pth\n",
      "INFO:test2:Loaded model weights\n",
      "INFO:test2:Loaded checkpoint './logs/test2/G_2333333.pth' (epoch 250)\n",
      "/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/torch/functional.py:650: UserWarning: stft with return_complex=False is deprecated. In a future pytorch release, stft will return complex tensors for all inputs, and return_complex=False will raise an error.\n",
      "Note: you can still call torch.view_as_real on the complex output to recover the old return format. (Triggered internally at ../aten/src/ATen/native/SpectralOps.cpp:863.)\n",
      "return _VF.stft(input, n_fft, hop_length, win_length, window,  # type: ignore[attr-defined]\n",
      "/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/torch/autograd/__init__.py:251: UserWarning: Grad strides do not match bucket view strides. This may indicate grad was not created according to the gradient layout contract, or that the param's strides changed since DDP was constructed.  This is not an error, but may impair performance.\n",
      "grad.sizes() = [64, 1, 4], strides() = [4, 1, 1]\n",
      "bucket_view.sizes() = [64, 1, 4], strides() = [4, 4, 1] (Triggered internally at ../torch/csrc/distributed/c10d/reducer.cpp:320.)\n",
      "Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n",
      "INFO:test2:Saving model and optimizer state at epoch 250 to ./logs/test2/G_2333333.pth\n",
      "INFO:test2:Saving model and optimizer state at epoch 250 to ./logs/test2/D_2333333.pth\n",
      "INFO:test2:saving ckpt test2_e250:Success.\n",
      "INFO:test2:====> Epoch: 250 [2023-11-16 13:23:10] | (0:00:20.716771)\n",
      "INFO:test2:Training is done. The program is closed.\n",
      "INFO:test2:saving final ckpt:Success.\n",
      "/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 20 leaked semaphore objects to clean up at shutdown\n",
      "warnings.warn('resource_tracker: There appear to be %d '\n",
      "训练结束, 您可查看控制台训练日志或实验文件夹下的train.log\n"
     ]
    }
   ],
   "source": [
    "#@title 4.Train Model\n",
    "#@markdown <small> Enter your ngrok authtoken to open tensorboard. Get one here: https://dashboard.ngrok.com/get-started/your-authtoken\n",
    "ngrok_authtoken = \"2VsTppE0Z0NEkYGk5BiSotnQRhT_3fqLxdic8Aqg9EZ29cpnL\"#@param {type:\"string\"}\n",
    "!ngrok config add-authtoken {ngrok_authtoken}\n",
    "%cd /home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
    "from random import shuffle\n",
    "import json\n",
    "import os\n",
    "import pathlib\n",
    "from subprocess import Popen, PIPE, STDOUT\n",
    "from pyngrok import ngrok\n",
    "now_dir=os.getcwd()\n",
    "#@markdown <small> Enter the name of your model again. It must be the same you chose before.\n",
    "model_name = 'test2' #@param {type:\"string\"}\n",
    "#@markdown <small> Choose how often to save the model and how much training you want it to have.\n",
    "save_frequency = 50 # @param {type:\"slider\", min:5, max:50, step:5}\n",
    "epochs = 250 # @param {type:\"slider\", min:10, max:1000, step:10}\n",
    "#@markdown <small> ONLY cache datasets under 10 minutes long. Otherwise leave this unchecked.\n",
    "cache = False #@param {type:\"boolean\"}\n",
    "# Remove the logging setup\n",
    "\n",
    "def click_train(\n",
    "    exp_dir1,\n",
    "    sr2,\n",
    "    if_f0_3,\n",
    "    spk_id5,\n",
    "    save_epoch10,\n",
    "    total_epoch11,\n",
    "    batch_size12,\n",
    "    if_save_latest13,\n",
    "    pretrained_G14,\n",
    "    pretrained_D15,\n",
    "    gpus16,\n",
    "    if_cache_gpu17,\n",
    "    if_save_every_weights18,\n",
    "    version19,\n",
    "):\n",
    "    # 生成filelist\n",
    "    exp_dir = \"%s/logs/%s\" % (now_dir, exp_dir1)\n",
    "    os.makedirs(exp_dir, exist_ok=True)\n",
    "    gt_wavs_dir = \"%s/0_gt_wavs\" % (exp_dir)\n",
    "    feature_dir = (\n",
    "        \"%s/3_feature256\" % (exp_dir)\n",
    "        if version19 == \"v1\"\n",
    "        else \"%s/3_feature768\" % (exp_dir)\n",
    "    )\n",
    "    if if_f0_3:\n",
    "        f0_dir = \"%s/2a_f0\" % (exp_dir)\n",
    "        f0nsf_dir = \"%s/2b-f0nsf\" % (exp_dir)\n",
    "        names = (\n",
    "            set([name.split(\".\")[0] for name in os.listdir(gt_wavs_dir)])\n",
    "            & set([name.split(\".\")[0] for name in os.listdir(feature_dir)])\n",
    "            & set([name.split(\".\")[0] for name in os.listdir(f0_dir)])\n",
    "            & set([name.split(\".\")[0] for name in os.listdir(f0nsf_dir)])\n",
    "        )\n",
    "    else:\n",
    "        names = set([name.split(\".\")[0] for name in os.listdir(gt_wavs_dir)]) & set(\n",
    "            [name.split(\".\")[0] for name in os.listdir(feature_dir)]\n",
    "        )\n",
    "    opt = []\n",
    "    for name in names:\n",
    "        if if_f0_3:\n",
    "            opt.append(\n",
    "                \"%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s\"\n",
    "                % (\n",
    "                    gt_wavs_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    feature_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    f0_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    f0nsf_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    spk_id5,\n",
    "                )\n",
    "            )\n",
    "        else:\n",
    "            opt.append(\n",
    "                \"%s/%s.wav|%s/%s.npy|%s\"\n",
    "                % (\n",
    "                    gt_wavs_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    feature_dir.replace(\"\\\\\", \"\\\\\\\\\"),\n",
    "                    name,\n",
    "                    spk_id5,\n",
    "                )\n",
    "            )\n",
    "    fea_dim = 256 if version19 == \"v1\" else 768\n",
    "    if if_f0_3:\n",
    "        for _ in range(2):\n",
    "            opt.append(\n",
    "                \"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s\"\n",
    "                % (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5)\n",
    "            )\n",
    "    else:\n",
    "        for _ in range(2):\n",
    "            opt.append(\n",
    "                \"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s\"\n",
    "                % (now_dir, sr2, now_dir, fea_dim, spk_id5)\n",
    "            )\n",
    "    shuffle(opt)\n",
    "    with open(\"%s/filelist.txt\" % exp_dir, \"w\") as f:\n",
    "        f.write(\"\\n\".join(opt))\n",
    "\n",
    "    # Replace logger.debug, logger.info with print statements\n",
    "    print(\"Write filelist done\")\n",
    "    print(\"Use gpus:\", str(gpus16))\n",
    "    if pretrained_G14 == \"\":\n",
    "        print(\"No pretrained Generator\")\n",
    "    if pretrained_D15 == \"\":\n",
    "        print(\"No pretrained Discriminator\")\n",
    "    if version19 == \"v1\" or sr2 == \"40k\":\n",
    "        config_path = \"configs/v1/%s.json\" % sr2\n",
    "    else:\n",
    "        config_path = \"configs/v2/%s.json\" % sr2\n",
    "    config_save_path = os.path.join(exp_dir, \"config.json\")\n",
    "    if not pathlib.Path(config_save_path).exists():\n",
    "        with open(config_save_path, \"w\", encoding=\"utf-8\") as f:\n",
    "            with open(config_path, \"r\") as config_file:\n",
    "                config_data = json.load(config_file)\n",
    "                json.dump(\n",
    "                    config_data,\n",
    "                    f,\n",
    "                    ensure_ascii=False,\n",
    "                    indent=4,\n",
    "                    sort_keys=True,\n",
    "                )\n",
    "            f.write(\"\\n\")\n",
    "\n",
    "    cmd = (\n",
    "        'python infer/modules/train/train.py -e \"%s\" -sr %s -f0 %s -bs %s -g %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s'\n",
    "        % (\n",
    "            exp_dir1,\n",
    "            sr2,\n",
    "            1 if if_f0_3 else 0,\n",
    "            batch_size12,\n",
    "            gpus16,\n",
    "            total_epoch11,\n",
    "            save_epoch10,\n",
    "            \"-pg %s\" % pretrained_G14 if pretrained_G14 != \"\" else \"\",\n",
    "            \"-pd %s\" % pretrained_D15 if pretrained_D15 != \"\" else \"\",\n",
    "            1 if if_save_latest13 == True else 0,\n",
    "            1 if if_cache_gpu17 == True else 0,\n",
    "            1 if if_save_every_weights18 == True else 0,\n",
    "            version19,\n",
    "        )\n",
    "    )\n",
    "    # Use PIPE to capture the output and error streams\n",
    "    p = Popen(cmd, shell=True, cwd=now_dir, stdout=PIPE, stderr=STDOUT, bufsize=1, universal_newlines=True)\n",
    "\n",
    "    # Print the command's output as it runs\n",
    "    for line in p.stdout:\n",
    "        print(line.strip())\n",
    "\n",
    "    # Wait for the process to finish\n",
    "    p.wait()\n",
    "    return \"训练结束, 您可查看控制台训练日志或实验文件夹下的train.log\"\n",
    "%load_ext tensorboard\n",
    "%tensorboard --logdir ./logs --port=8888\n",
    "print(\"Tensorboard NGROK URL:\",end=\"\")\n",
    "try:\n",
    "    ngrok_url = ngrok.connect(8888)\n",
    "    print(ngrok_url)\n",
    "    training_log = click_train(\n",
    "        model_name,\n",
    "        '40k',\n",
    "        True,\n",
    "        0,\n",
    "        save_frequency,\n",
    "        epochs,\n",
    "        7,\n",
    "        True,\n",
    "        'assets/pretrained_v2/f0G40k.pth',\n",
    "        'assets/pretrained_v2/f0D40k.pth',\n",
    "        0,\n",
    "        cache,\n",
    "        True,\n",
    "        'v2',\n",
    "    )\n",
    "    print(training_log)\n",
    "except:\n",
    "    ngrok.kill()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "d04566ac",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
      "2023-11-16 09:36:40 | INFO | faiss.loader | Loading faiss with AVX2 support.\n",
      "2023-11-16 09:36:40 | INFO | faiss.loader | Successfully loaded faiss with AVX2 support.\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/haive/voice_clone_rvc/rvc_implementation/project-main/app.py\", line 33, in <module>\n",
      "    from infer.modules.uvr5.modules import uvr\n",
      "  File \"/home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/modules.py\", line 11, in <module>\n",
      "    from infer.modules.uvr5.mdxnet import MDXNetDereverb\n",
      "  File \"/home/haive/voice_clone_rvc/rvc_implementation/project-main/infer/modules/uvr5/mdxnet.py\", line 6, in <module>\n",
      "    import librosa\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/__init__.py\", line 209, in <module>\n",
      "    from . import core\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/core/__init__.py\", line 5, in <module>\n",
      "    from .convert import *  # pylint: disable=wildcard-import\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/core/convert.py\", line 7, in <module>\n",
      "    from . import notation\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/core/notation.py\", line 8, in <module>\n",
      "    from ..util.exceptions import ParameterError\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/util/__init__.py\", line 77, in <module>\n",
      "    from .utils import *  # pylint: disable=wildcard-import\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/librosa/util/utils.py\", line 9, in <module>\n",
      "    import numba\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/numba/__init__.py\", line 42, in <module>\n",
      "    from numba.np.ufunc import (vectorize, guvectorize, threading_layer,\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/numba/np/ufunc/__init__.py\", line 3, in <module>\n",
      "    from numba.np.ufunc.decorators import Vectorize, GUVectorize, vectorize, guvectorize\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/numba/np/ufunc/decorators.py\", line 3, in <module>\n",
      "    from numba.np.ufunc import _internal\n",
      "SystemError: initialization of _internal failed without raising an exception\n"
     ]
    }
   ],
   "source": [
    "%cd /home/haive/voice_clone_rvc/src/rvc_implementation\n",
    "!python app.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "c3eb06c9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
      "2023-11-16 11:21:41 | INFO | faiss.loader | Loading faiss with AVX2 support.\n",
      "2023-11-16 11:21:41 | INFO | faiss.loader | Successfully loaded faiss with AVX2 support.\n",
      "2023-11-16 11:21:44 | INFO | configs.config | Found GPU NVIDIA A100-PCIE-40GB\n",
      "2023-11-16 11:21:44 | INFO | __main__ | Use Language: en_US\n",
      "\u001b[31mERROR\u001b[0m:    [Errno 98] error while attempting to bind on address ('162.244.83.121', 7885): address already in use\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/haive/voice_clone_rvc/rvc_implementation/project-main/infer-web.py\", line 1501, in <module>\n",
      "    app.queue().launch(\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/gradio/blocks.py\", line 1865, in launch\n",
      "    ) = networking.start_server(\n",
      "  File \"/home/haive/miniconda3/envs/rvc_clone/lib/python3.9/site-packages/gradio/networking.py\", line 206, in start_server\n",
      "    raise OSError(\n",
      "OSError: Cannot find empty port in range: 7885-7885. You can specify a different port by setting the GRADIO_SERVER_PORT environment variable or passing the `server_port` parameter to `launch()`.\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "%cd /home/haive/voice_clone_rvc/rvc_implementation/project-main\n",
    "!python infer-web.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "5760aabf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[0m\u001b[01;35ma.png\u001b[0m                    infer-web.py\r\n",
      "app.py                   LICENSE\r\n",
      "\u001b[01;34massets\u001b[0m/                  \u001b[01;34mlogs\u001b[0m/\r\n",
      "\u001b[01;34maudios\u001b[0m/                  MIT协议暨相关引用库协议\r\n",
      "\u001b[01;34mconfigs\u001b[0m/                 poetry.lock\r\n",
      "docker-compose.yml       pyproject.toml\r\n",
      "Dockerfile               README.md\r\n",
      "\u001b[01;34mdocs\u001b[0m/                    requirements-dml.txt\r\n",
      "download_files.py        requirements-ipex.txt\r\n",
      "environment_dml.yaml     requirements-safe.txt\r\n",
      "go-realtime-gui.bat      requirements.txt\r\n",
      "go-realtime-gui-dml.bat  requirements-win-for-realtime_vc_gui-dml.txt\r\n",
      "go-web.bat               requirements-win-for-realtime_vc_gui.txt\r\n",
      "go-web-dml.bat           Retrieval_based_Voice_Conversion_WebUI.ipynb\r\n",
      "GUI.py                   Retrieval_based_Voice_Conversion_WebUI_v2.ipynb\r\n",
      "gui_v1.py                \u001b[01;32mrun.sh\u001b[0m*\r\n",
      "\u001b[01;34mi18n\u001b[0m/                    \u001b[01;34mtools\u001b[0m/\r\n",
      "\u001b[01;34minfer\u001b[0m/                   \u001b[01;32mvenv.sh\u001b[0m*\r\n"
     ]
    }
   ],
   "source": [
    "ls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fca22f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "#@title OR Open the GUI (Banned for free Colab Notebooks)\n",
    "if not 'installed' in locals():\n",
    "    %cd /content\n",
    "    from google.colab import drive\n",
    "    drive.mount('/content/drive')\n",
    "    from IPython.display import clear_output\n",
    "    from ipywidgets import Button\n",
    "    import os\n",
    "    if not os.path.exists('/content/drive'):\n",
    "        print(\"Your drive is not mounted. Creating Fake Drive.\")\n",
    "        os.makedirs('/content/drive/MyDrive')\n",
    "    if not os.path.exists('/content/drive/MyDrive/project-main'):\n",
    "        !wget https://huggingface.co/Rejekts/project/resolve/main/project-main.zip -O '/content/project-main.zip' && unzip 'project-main.zip' -d /content/drive/MyDrive\n",
    "    !cd '/content/drive/MyDrive/project-main' && python download_files.py && pip install -r 'requirements.txt'\n",
    "    !rm /content/project-main.zip\n",
    "    !rm -r /content/sample_data\n",
    "    !mkdir -p /content/dataset\n",
    "    clear_output()\n",
    "    Button(description=\"\\u2714 Success\", button_style=\"success\")\n",
    "tensorboard = True #@param {type:\"boolean\"}\n",
    "if tensorboard:\n",
    "    %load_ext tensorboard\n",
    "    %tensorboard --logdir ./logs\n",
    "%cd /content/drive/MyDrive/project-main\n",
    "!python app.py --colab"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b53cc9b4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5ae2343",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "15199059",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "c86ac935-2aea-4621-a78a-74d74157995b\n",
      "API request successful.\n",
      "Response: None\n",
      "/var/www/html/output/public/AI_voice/rvc_implc86ac935-2aea-4621-a78a-74d74157995b.mp3\n",
      "https://cloud.haive.online/public/AI_voice/rvc_implc86ac935-2aea-4621-a78a-74d74157995b.mp3\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import requests\n",
    "import uuid\n",
    "\n",
    "def main_TTS(text,id,uid, speaker_iden):\n",
    "    payload = {\n",
    "        \"text\": text,\n",
    "        \"speaker_iden\" : speaker_iden,\n",
    "        \"id\" : id,\n",
    "        \"uid\": uid\n",
    "    }\n",
    "    \n",
    "    gg = text_to_audio_api_request(payload = payload)\n",
    "    media_url = \"https://cloud.haive.online/public/AI_voice/\"+id+uid+\".mp3\"\n",
    "    output_file_mp3 = \"/var/www/html/output/public/AI_voice/\"+id+str(uid)+\".mp3\"\n",
    "    return output_file_mp3, media_url\n",
    "\n",
    "def text_to_audio_api_request(payload):\n",
    "\n",
    "    url = \"http://162.244.83.121:8008/text-to-audio-msen\"\n",
    "    headers = {\"Content-Type\": \"application/json\"}\n",
    "    \n",
    "    try:\n",
    "        response = requests.post(url, headers=headers, data=json.dumps(payload))\n",
    "        response_data = response.json()\n",
    "        \n",
    "        if response.status_code == 200:\n",
    "            print(\"API request successful.\")\n",
    "            print(\"Response:\", response_data)\n",
    "        else:\n",
    "            print(\"API request failed. Status code:\", response.status_code)\n",
    "            print(\"Response:\", response_data)\n",
    "    except Exception as e:\n",
    "        print(\"An error occurred:\", str(e))\n",
    "    return \"File Created\"\n",
    "\n",
    "\n",
    "text = \"Hello, my name is Haive. I work in Codegama\"\n",
    "model_name = \"rvc_impl\"\n",
    "uid = str(uuid.uuid4())\n",
    "print(uid)\n",
    "speaker_iden = 19\n",
    "output_file_mp3, media_url = main_TTS(text,model_name,uid, speaker_iden)\n",
    "print(output_file_mp3)\n",
    "print(media_url)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8151884f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import librosa\n",
    "\n",
    "def read_audio_file(file_path):\n",
    "    # Read the audio file\n",
    "    audio, sample_rate = librosa.load(file_path, sr=None)\n",
    "\n",
    "    return audio, sample_rate\n",
    "\n",
    "# Example usage\n",
    "file_path = 'path/to/your/audio/file.wav'  # replace with the actual file path\n",
    "audio, sample_rate = read_audio_file(file_path)\n",
    "\n",
    "# Now, you can use 'audio' and 'sample_rate' in your further processing\n",
    "print(f\"Audio duration: {len(audio)/sample_rate} seconds\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
Back to Directory File Manager