rmysmo commited on
Commit
8e7c835
·
1 Parent(s): 0308efd

added translate

Browse files
Files changed (3) hide show
  1. app.py +18 -16
  2. i18n/en_US.json +1 -1
  3. i18n/uz_UZ.json +2 -109
app.py CHANGED
@@ -1611,7 +1611,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1611
  fn=change_choices, inputs=[], outputs=[sid0, file_index1]
1612
  )
1613
  # file_big_npy1 = gr.Textbox(
1614
- # label=i18n("特征文件路径"),
1615
  # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1616
  # interactive=True,
1617
  # )
@@ -1659,7 +1659,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1659
  resample_sr0 = gr.Slider(
1660
  minimum=0,
1661
  maximum=48000,
1662
- label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1663
  value=0,
1664
  step=1,
1665
  interactive=True,
@@ -1738,7 +1738,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1738
  outputs=[formant_preset, qfrency, tmbre])
1739
  with gr.Row():
1740
  vc_output1 = gr.Textbox("")
1741
- f0_file = gr.File(label=i18n("F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调"), visible=False)
1742
 
1743
  but0.click(
1744
  vc_single,
@@ -1767,7 +1767,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1767
  vc_transform1 = gr.Number(
1768
  label=i18n("Transpose(integer, number of semitones, octave up 12 octave down -12)"), value=0
1769
  )
1770
- opt_input = gr.Textbox(label=i18n("指定输出文件夹"), value="opt")
1771
  f0method1 = gr.Radio(
1772
  label=i18n(
1773
  "Select the pitch extraction algorithm. When inputting a singing voice, you can use pm to speed it up. Harvest has good bass but is extremely slow. Crepe has good effects but consumes the GPU."
@@ -1779,14 +1779,16 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1779
  filter_radius1 = gr.Slider(
1780
  minimum=0,
1781
  maximum=7,
1782
- label=i18n(">=3, use median filtering on the harvest pitch recognition result, the value is the filter radius, which can weaken the mute sound."),
 
1783
  value=3,
1784
  step=1,
1785
  interactive=True,
1786
  )
1787
  with gr.Column():
1788
  file_index3 = gr.Textbox(
1789
- label=i18n("Feature retrieval library file path, if empty, use the drop-down selection result"),
 
1790
  value="",
1791
  interactive=True,
1792
  )
@@ -1801,14 +1803,14 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1801
  outputs=file_index4,
1802
  )
1803
  # file_big_npy2 = gr.Textbox(
1804
- # label=i18n("特征文件路径"),
1805
  # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1806
  # interactive=True,
1807
  # )
1808
  index_rate2 = gr.Slider(
1809
  minimum=0,
1810
  maximum=1,
1811
- label=i18n("检索特征占比"),
1812
  value=1,
1813
  interactive=True,
1814
  )
@@ -1816,7 +1818,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1816
  resample_sr1 = gr.Slider(
1817
  minimum=0,
1818
  maximum=48000,
1819
- label=i18n("后处理重采样至最终采样率,0为不进行重采样"),
1820
  value=0,
1821
  step=1,
1822
  interactive=True,
@@ -1824,7 +1826,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1824
  rms_mix_rate1 = gr.Slider(
1825
  minimum=0,
1826
  maximum=1,
1827
- label=i18n("输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络"),
1828
  value=1,
1829
  interactive=True,
1830
  )
@@ -1832,7 +1834,7 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1832
  minimum=0,
1833
  maximum=0.5,
1834
  label=i18n(
1835
- "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果"
1836
  ),
1837
  value=0.33,
1838
  step=0.01,
@@ -1840,21 +1842,21 @@ with gr.Blocks(theme=gr.themes.Base(), title='Voice DeepFake 💻') as app:
1840
  )
1841
  with gr.Column():
1842
  dir_input = gr.Textbox(
1843
- label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1844
  value="E:\codes\py39\\test-20230416b\\todo-songs",
1845
  )
1846
  inputs = gr.File(
1847
- file_count="multiple", label=i18n("也可批量输入音频文件, 二选一, 优先读文件夹")
1848
  )
1849
  with gr.Row():
1850
  format1 = gr.Radio(
1851
- label=i18n("导出文件格式"),
1852
  choices=["wav", "flac", "mp3", "m4a"],
1853
  value="flac",
1854
  interactive=True,
1855
  )
1856
- but1 = gr.Button(i18n("转换"), variant="primary")
1857
- vc_output3 = gr.Textbox(label=i18n("输出信息"))
1858
  but1.click(
1859
  vc_multi,
1860
  [
 
1611
  fn=change_choices, inputs=[], outputs=[sid0, file_index1]
1612
  )
1613
  # file_big_npy1 = gr.Textbox(
1614
+ # label=i18n("Feature file path"),
1615
  # value="E:\\codes\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1616
  # interactive=True,
1617
  # )
 
1659
  resample_sr0 = gr.Slider(
1660
  minimum=0,
1661
  maximum=48000,
1662
+ label=i18n("Post-processing resampling to the final sampling rate, 0 means no resampling"),
1663
  value=0,
1664
  step=1,
1665
  interactive=True,
 
1738
  outputs=[formant_preset, qfrency, tmbre])
1739
  with gr.Row():
1740
  vc_output1 = gr.Textbox("")
1741
+ f0_file = gr.File(label=i18n("F0 curve file, optional, one line per pitch, replaces the default F0 and sharp and flat tones"), visible=False)
1742
 
1743
  but0.click(
1744
  vc_single,
 
1767
  vc_transform1 = gr.Number(
1768
  label=i18n("Transpose(integer, number of semitones, octave up 12 octave down -12)"), value=0
1769
  )
1770
+ opt_input = gr.Textbox(label=i18n("Specify output folder"), value="opt")
1771
  f0method1 = gr.Radio(
1772
  label=i18n(
1773
  "Select the pitch extraction algorithm. When inputting a singing voice, you can use pm to speed it up. Harvest has good bass but is extremely slow. Crepe has good effects but consumes the GPU."
 
1779
  filter_radius1 = gr.Slider(
1780
  minimum=0,
1781
  maximum=7,
1782
+ label=i18n(
1783
+ ">=3, use median filtering on the harvest pitch recognition result, the value is the filter radius, which can weaken the mute sound."),
1784
  value=3,
1785
  step=1,
1786
  interactive=True,
1787
  )
1788
  with gr.Column():
1789
  file_index3 = gr.Textbox(
1790
+ label=i18n(
1791
+ "Feature retrieval library file path, if empty, use the drop-down selection result"),
1792
  value="",
1793
  interactive=True,
1794
  )
 
1803
  outputs=file_index4,
1804
  )
1805
  # file_big_npy2 = gr.Textbox(
1806
+ # label=i18n("Feature file path"),
1807
  # value="E:\\codes\\py39\\vits_vc_gpu_train\\logs\\mi-test-1key\\total_fea.npy",
1808
  # interactive=True,
1809
  # )
1810
  index_rate2 = gr.Slider(
1811
  minimum=0,
1812
  maximum=1,
1813
+ label=i18n("Retrieval feature ratio"),
1814
  value=1,
1815
  interactive=True,
1816
  )
 
1818
  resample_sr1 = gr.Slider(
1819
  minimum=0,
1820
  maximum=48000,
1821
+ label=i18n("Post-processing resampling to the final sampling rate, 0 means no resampling"),
1822
  value=0,
1823
  step=1,
1824
  interactive=True,
 
1826
  rms_mix_rate1 = gr.Slider(
1827
  minimum=0,
1828
  maximum=1,
1829
+ label=i18n("The input source volume envelope replaces the output volume envelope fusion ratio. The closer it is to 1, the more output envelope is used."),
1830
  value=1,
1831
  interactive=True,
1832
  )
 
1834
  minimum=0,
1835
  maximum=0.5,
1836
  label=i18n(
1837
+ "Protects clear consonants and breathing sounds, and prevents electronic music tearing and other artifacts. It is not enabled when it is set to 0.5. It is increased when it is lowered, but the indexing effect may be reduced."
1838
  ),
1839
  value=0.33,
1840
  step=0.01,
 
1842
  )
1843
  with gr.Column():
1844
  dir_input = gr.Textbox(
1845
+ label=i18n("Enter the path of the audio folder to be processed (just copy it from the address bar of the file manager)"),
1846
  value="E:\codes\py39\\test-20230416b\\todo-songs",
1847
  )
1848
  inputs = gr.File(
1849
+ file_count="multiple", label=i18n("You can also batch import audio files, choose one of the two, and read the folder first")
1850
  )
1851
  with gr.Row():
1852
  format1 = gr.Radio(
1853
+ label=i18n("Export file format"),
1854
  choices=["wav", "flac", "mp3", "m4a"],
1855
  value="flac",
1856
  interactive=True,
1857
  )
1858
+ but1 = gr.Button(i18n("Conversion"), variant="primary")
1859
+ vc_output3 = gr.Textbox(label=i18n("Output information"))
1860
  but1.click(
1861
  vc_multi,
1862
  [
i18n/en_US.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
3
- "": "Yes",
4
  "step1:正在处理数据": "Step 1: Processing data",
5
  "step2a:无需提取音高": "Step 2a: Skipping pitch extraction",
6
  "step2b:正在提取特征": "Step 2b: Extracting features",
 
1
  {
2
  "很遗憾您这没有能用的显卡来支持您训练": "Unfortunately, there is no compatible GPU available to support your training.",
3
+ "Yes": "Yes",
4
  "step1:正在处理数据": "Step 1: Processing data",
5
  "step2a:无需提取音高": "Step 2a: Skipping pitch extraction",
6
  "step2b:正在提取特征": "Step 2b: Extracting features",
i18n/uz_UZ.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "Unfortunately you don't have a working graphics card to support your training": "Afsuski, mashg'ulotingizni qo'llab-quvvatlash uchun mos keladigan GPU mavjud emas.",
3
- "": "Ha",
4
  "step1:processing data": "1-qadam: Ma'lumotlarni qayta ishlash",
5
  "step2a:No need to extract pitch": "2a-qadam: Ohangni chiqarishni o'tkazib yuborish",
6
  "step2b:extracting features": "2b-qadam: Xususiyatlarni ajratib olish",
@@ -15,112 +15,5 @@
15
  "Please select the speaker ID": "Spiker/Qo'shiqchi identifikatorini tanlang:",
16
  "For male to female, +12key is recommended, for female to male, -12key is recommended. If the sound range explodes and causes timbre distortion, you can adjust it to the appropriate range yourself.": "Tavsiya etilgan +12 kaliti erkakdan ayolga o'tish uchun, va ayoldan erkakka o'tish uchun -12 kalit. Ovoz diapazoni juda uzoqqa ketsa va ovoz buzilgan bo'lsa, siz uni o'zingiz ham tegishli diapazonga moslashingiz mumkin.",
17
  "Transpose(integer, number of semitones, octave up 12 octave down -12)": "Transpoze (butun son, yarim tonlar soni, oktavaga ko'tarilishi: 12, oktavaga past: -12):",
18
- "Enter the path of the audio file to be processed (the default is an example of the correct format)": "Qayta ishlanadigan audio faylning yo'lini kiriting (standart - to'g'ri format namunasi):",
19
- "Select the pitch extraction algorithm. When inputting a singing voice, you can use pm to speed it up. Harvest has good bass but is extremely slow. Crepe has good effects but consumes the GPU.": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'harvest': better bass but extremely slow; 'crepe': better quality but GPU intensive):",
20
- "crepe_hop_length": "Mangio-Crepe Hop Length (Only applies to mangio-crepe): Hop length refers to the time it takes for the speaker to jump to a dramatic pitch. Lower hop lengths take more time to infer but are more pitch accurate.",
21
- "特征检索库文件路径": "Feature search database file path",
22
- ">=3, use median filtering on the harvest pitch recognition result, the value is the filter radius, which can weaken the mute sound.": "If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.",
23
- "Feature retrieval library file path, if empty, use the drop-down selection result": "Path to the feature index file. Leave blank to use the selected result from the dropdown:",
24
- "Automatically detect index path, drop-down selection (dropdown)": "Auto-detect index path and select from the dropdown:",
25
- "特征文件路径": "Path to feature file:",
26
- "检索特征占比": "Search feature ratio:",
27
- "后处理重采样至最终采样率,0为不进行重采样": "Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling:",
28
- "输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络": "Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used:",
29
- "保护清辅音和呼吸声,防止电音撕裂等artifact,拉满0.5不开启,调低加大保护力度但可能降低索引效果": "Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy:",
30
- "F0曲线文件, 可选, 一行一个音高, 代替默认F0及升降调": "F0 curve file (optional). One pitch per line. Replaces the default F0 and pitch modulation:",
31
- "转换": "Convert",
32
- "输出信息": "Output information",
33
- "输出音频(右下角三个点,点了可以下载)": "Export audio (click on the three dots in the lower right corner to download)",
34
- "批量转换, 输入待转换音频文件夹, 或上传多个音频文件, 在指定文件夹(默认opt)下输出转换的音频. ": "Batch conversion. Enter the folder containing the audio files to be converted or upload multiple audio files. The converted audio will be output in the specified folder (default: 'opt').",
35
- "指定输出文件夹": "Specify output folder:",
36
- "输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)": "Enter the path of the audio folder to be processed (copy it from the address bar of the file manager):",
37
- "也可批量输入音频文件, 二选一, 优先读文件夹": "You can also input audio files in batches. Choose one of the two options. Priority is given to reading from the folder.",
38
- "导出文件格式": "Export file format",
39
- "伴奏人声分离&去混响&去回声": "Vocals/Accompaniment Separation & Reverberation Removal",
40
- "输入待处理音频文件夹路径": "Enter the path of the audio folder to be processed:",
41
- "模型": "Model",
42
- "指定输出主人声文件夹": "Specify the output folder for vocals:",
43
- "指定输出非主人声文件夹": "Specify the output folder for accompaniment:",
44
- "训练": "Train",
45
- "step1: 填写实验配置. 实验数据放在logs下, 每个实验一个文件夹, 需手工输入实验名路径, 内含实验配置, 日志, 训练得到的模型文件. ": "Step 1: Fill in the experimental configuration. Experimental data is stored in the 'logs' folder, with each experiment having a separate folder. Manually enter the experiment name path, which contains the experimental configuration, logs, and trained model files.",
46
- "输入实验名": "Enter the experiment name:",
47
- "目标采样率": "Target sample rate:",
48
- "模型是否带音高指导(唱歌一定要, 语音可以不要)": "Whether the model has pitch guidance (required for singing, optional for speech):",
49
- "版本": "Version",
50
- "提取音高和处理数据使用的CPU进程数": "Number of CPU processes used for pitch extraction and data processing:",
51
- "step2a: 自动遍历训练文件夹下所有可解码成音频的文件并进行切片归一化, 在实验目录下生成2个wav文件夹; 暂时只支持单人训练. ": "Step 2a: Automatically traverse all files in the training folder that can be decoded into audio and perform slice normalization. Generates 2 wav folders in the experiment directory. Currently, only single-singer/speaker training is supported.",
52
- "输入训练文件夹路径": "Enter the path of the training folder:",
53
- "请指定说话人id": "Please specify the speaker/singer ID:",
54
- "处理数据": "Process data",
55
- "step2b: 使用CPU提取音高(如果模型带音高), 使用GPU提取特征(选择卡号)": "Step 2b: Use CPU to extract pitch (if the model has pitch), use GPU to extract features (select GPU index):",
56
- "以-分隔输入使用的卡号, 例如 0-1-2 使用卡0和卡1和卡2": "Enter the GPU index(es) separated by '-', e.g., 0-1-2 to use GPU 0, 1, and 2:",
57
- "显卡信息": "GPU Information",
58
- "选择音高提取算法:输入歌声可用pm提速,高质量语音但CPU差可用dio提速,harvest质量更好但慢": "Select the pitch extraction algorithm ('pm': faster extraction but lower-quality speech; 'dio': improved speech but slower extraction; 'harvest': better quality but slower extraction):",
59
- "特征提取": "Feature extraction",
60
- "step3: 填写训练设置, 开始训练模型和索引": "Step 3: Fill in the training settings and start training the model and index",
61
- "保存频率save_every_epoch": "Save frequency (save_every_epoch):",
62
- "总训练轮数total_epoch": "Total training epochs (total_epoch):",
63
- "每张显卡的batch_size": "Batch size per GPU:",
64
- "是否仅保存最新的ckpt文件以节省硬盘空间": "Save only the latest '.ckpt' file to save disk space:",
65
- "否": "No",
66
- "是否缓存所有训练集至显存. 10min以下小数据可缓存以加速训练, 大数据缓存会炸显存也加不了多少速": "Cache all training sets to GPU memory. Caching small datasets (less than 10 minutes) can speed up training, but caching large datasets will consume a lot of GPU memory and may not provide much speed improvement:",
67
- "是否在每次保存时间点将最终小模型保存至weights文件夹": "Save a small final model to the 'weights' folder at each save point:",
68
- "加载预训练底模G路径": "Load pre-trained base model G path:",
69
- "加载预训练底模D路径": "Load pre-trained base model D path:",
70
- "训练模型": "Train model",
71
- "训练特征索引": "Train feature index",
72
- "一键训练": "One-click training",
73
- "ckpt处理": "ckpt Processing",
74
- "模型融合, 可用于测试音色融合": "Model fusion, can be used to test timbre fusion",
75
- "A模型路径": "Path to Model A:",
76
- "B模型路径": "Path to Model B:",
77
- "A模型权重": "Weight (w) for Model A:",
78
- "模型是否带音高指导": "Whether the model has pitch guidance:",
79
- "要置入的模型信息": "Model information to be placed:",
80
- "保存的模型名不带后缀": "Saved model name (without extension):",
81
- "模型版本型号": "Model architecture version:",
82
- "融合": "Fusion",
83
- "修改模型信息(仅支持weights文件夹下提取的小模型文件)": "Modify model information (only supported for small model files extracted from the 'weights' folder)",
84
- "模型路径": "Path to Model:",
85
- "要改的模型信息": "Model information to be modified:",
86
- "保存的文件名, 默认空为和源文件同名": "Save file name (default: same as the source file):",
87
- "修改": "Modify",
88
- "查看模型信息(仅支持weights文件夹下提取的小模型文件)": "View model information (only supported for small model files extracted from the 'weights' folder)",
89
- "查看": "View",
90
- "模型提取(输入logs文件夹下大文件模型路径),适用于训一半不想训了模型没有自动提取保存小文件模型,或者想测试中间模型的情况": "Model extraction (enter the path of the large file model under the 'logs' folder). This is useful if you want to stop training halfway and manually extract and save a small model file, or if you want to test an intermediate model:",
91
- "保存名": "Save name:",
92
- "模型是否带音高指导,1是0否": "Whether the model has pitch guidance (1: yes, 0: no):",
93
- "提取": "Extract",
94
- "Onnx导出": "Export Onnx",
95
- "RVC模型路径": "RVC Model Path:",
96
- "Onnx输出路径": "Onnx Export Path:",
97
- "MoeVS模型": "MoeVS Model",
98
- "导出Onnx模型": "Export Onnx Model",
99
- "常见问题解答": "FAQ (Frequently Asked Questions)",
100
- "招募音高曲线前端编辑器": "Recruiting front-end editors for pitch curves",
101
- "加开发群联系我xxxxx": "Join the development group and contact me at xxxxx",
102
- "点击查看交流、问题反馈群号": "Click to view the communication and problem feedback group number",
103
- "xxxxx": "xxxxx",
104
- "加载模型": "Load model",
105
- "Hubert模型": "Hubert Model",
106
- "选择.pth文件": "Select the .pth file",
107
- "选择.index文件": "Select the .index file",
108
- "选择.npy文件": "Select the .npy file",
109
- "输入设备": "Input device",
110
- "输出设备": "Output device",
111
- "音频设备(请使用同种类驱动)": "Audio device (please use the same type of driver)",
112
- "响应阈值": "Response threshold",
113
- "音调设置": "Pitch settings",
114
- "Index Rate": "Index Rate",
115
- "常规设置": "General settings",
116
- "采样长度": "Sample length",
117
- "淡入淡出长度": "Fade length",
118
- "额外推理时长": "Extra inference time",
119
- "输入降噪": "Input noise reduction",
120
- "输出降噪": "Output noise reduction",
121
- "性能设置": "Performance settings",
122
- "开始音频转换": "Start audio conversion",
123
- "停止音频转换": "Stop audio conversion",
124
- "推理时间(ms):": "Inference time (ms):",
125
- "人声伴奏分离批量处理, 使用UVR5模型。 <br>合格的文件夹路径格式举例: E:\\codes\\py39\\vits_vc_gpu\\白鹭霜华测试样例(去文件管理器地址栏拷就行了)。 <br>模型分为三类: <br>1、保留人声:不带和声的音频选这个,对主人声保留比HP5更好。内置HP2和HP3两个模型,HP3可能轻微漏伴奏但对主人声保留比HP2稍微好一丁点; <br>2、仅保留主人声:带和声的音频选这个,对主人声可能有削弱。内置HP5一个模型; <br> 3、去混响、去延迟模型(by FoxJoy):<br>  (1)MDX-Net(onnx_dereverb):对于双通道混响是最好的选择,不能去除单通道混响;<br>&emsp;(234)DeEcho:去除延迟效果。Aggressive比Normal去除得更彻底,DeReverb额外去除混响,可去除单声道混响,但是对高频重的板式混响去不干净。<br>去混响/去延迟,附:<br>1、DeEcho-DeReverb模型的耗时是另外2个DeEcho模型的接近2倍;<br>2、MDX-Net-Dereverb模型挺慢的;<br>3、个人推荐的最干净的配置是先MDX-Net再DeEcho-Aggressive。": "Batch processing for vocal accompaniment separation using the UVR5 model.<br>Example of a valid folder path format: D:\\path\\to\\input\\folder (copy it from the file manager address bar).<br>The model is divided into three categories:<br>1. Preserve vocals: Choose this option for audio without harmonies. It preserves vocals better than HP5. It includes two built-in models: HP2 and HP3. HP3 may slightly leak accompaniment but preserves vocals slightly better than HP2.<br>2. Preserve main vocals only: Choose this option for audio with harmonies. It may weaken the main vocals. It includes one built-in model: HP5.<br>3. De-reverb and de-delay models (by FoxJoy):<br>  (1) MDX-Net: The best choice for stereo reverb removal but cannot remove mono reverb;<br>&emsp;(234) DeEcho: Removes delay effects. Aggressive mode removes more thoroughly than Normal mode. DeReverb additionally removes reverb and can remove mono reverb, but not very effectively for heavily reverberated high-frequency content.<br>De-reverb/de-delay notes:<br>1. The processing time for the DeEcho-DeReverb model is approximately twice as long as the other two DeEcho models.<br>2. The MDX-Net-Dereverb model is quite slow.<br>3. The recommended cleanest configuration is to apply MDX-Net first and then DeEcho-Aggressive."
126
  }
 
1
  {
2
  "Unfortunately you don't have a working graphics card to support your training": "Afsuski, mashg'ulotingizni qo'llab-quvvatlash uchun mos keladigan GPU mavjud emas.",
3
+ "Yes": "Ha",
4
  "step1:processing data": "1-qadam: Ma'lumotlarni qayta ishlash",
5
  "step2a:No need to extract pitch": "2a-qadam: Ohangni chiqarishni o'tkazib yuborish",
6
  "step2b:extracting features": "2b-qadam: Xususiyatlarni ajratib olish",
 
15
  "Please select the speaker ID": "Spiker/Qo'shiqchi identifikatorini tanlang:",
16
  "For male to female, +12key is recommended, for female to male, -12key is recommended. If the sound range explodes and causes timbre distortion, you can adjust it to the appropriate range yourself.": "Tavsiya etilgan +12 kaliti erkakdan ayolga o'tish uchun, va ayoldan erkakka o'tish uchun -12 kalit. Ovoz diapazoni juda uzoqqa ketsa va ovoz buzilgan bo'lsa, siz uni o'zingiz ham tegishli diapazonga moslashingiz mumkin.",
17
  "Transpose(integer, number of semitones, octave up 12 octave down -12)": "Transpoze (butun son, yarim tonlar soni, oktavaga ko'tarilishi: 12, oktavaga past: -12):",
18
+ "Enter the path of the audio file to be processed (the default is an example of the correct format)": "Qayta ishlanadigan audio faylning yo'lini kiriting (standart - to'g'ri format namunasi):"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  }