Skip to content

Commit

Permalink
修复了最近的几个比较严重的BUG,重构了部分页面
Browse files Browse the repository at this point in the history
  • Loading branch information
Chenyme authored Jul 26, 2024
1 parent 8d50596 commit 222cd0b
Show file tree
Hide file tree
Showing 25 changed files with 377 additions and 343 deletions.
8 changes: 4 additions & 4 deletions 1_install_CPU.bat
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ REM 检查和更新 streamlit
pip show streamlit > nul
if %errorlevel% equ 0 (
echo 更新 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
) else (
echo 安装 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
)

REM 检查和更新 streamlit-antd-components
Expand Down Expand Up @@ -76,10 +76,10 @@ REM 检查和更新 torch torchvision torchaudio
pip show torch torchvision torchaudio > nul
if %errorlevel% equ 0 (
echo 更新 torch torchvision torchaudio...
pip install --upgrade torch torchvision torchaudio
pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1
) else (
echo 安装 torch torchvision torchaudio...
pip install torch torchvision torchaudio
pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1
)

REM 检查和更新 faster-whisper
Expand Down
6 changes: 3 additions & 3 deletions 1_install_Cuda11.8.bat
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ REM 检查和更新 streamlit
pip show streamlit > nul
if %errorlevel% equ 0 (
echo 更新 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
) else (
echo 安装 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
)


Expand Down Expand Up @@ -105,7 +105,7 @@ if %errorlevel% equ 0 (

echo 安装 torch torchvision torchaudio...
echo 国内环境不稳定,建议开启VPN下载
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118


REM 检查和更新 faster-whisper
Expand Down
6 changes: 3 additions & 3 deletions 1_install_Cuda12.1.bat
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ REM 检查和更新 streamlit
pip show streamlit > nul
if %errorlevel% equ 0 (
echo 更新 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
) else (
echo 安装 streamlit...
pip install streamlit==1.36.0
pip install streamlit==1.37.0
)


Expand Down Expand Up @@ -108,7 +108,7 @@ if %errorlevel% equ 0 (

echo 安装 torch torchvision torchaudio...
echo 国内环境不稳定,建议开启VPN下载
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121


REM 检查和更新 faster-whisper
Expand Down
19 changes: 19 additions & 0 deletions 3_修复闪退(请不要误点).bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
@echo off
chcp 65001 > nul
setlocal

set "script_path=%~dp0"

if exist "%script_path%env\Library\bin\libiomp5md.dll" (
del "%script_path%env\Library\bin\libiomp5md.dll"
if not exist "%script_path%env\Library\bin\libiomp5md.dll" (
echo 文件已成功删除
) else (
echo 文件删除失败
)
) else (
echo 文件不存在
)

pause
endlocal
33 changes: 16 additions & 17 deletions Chenyme-AAVT.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,12 @@
from project.AVG.AVG import avg

st.set_page_config(
page_title="Chenyme-AAVT v0.8.4",
page_title="Chenyme-AAVT v0.8.5",
page_icon=":material/radio_button_checked:",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
layout="wide",
initial_sidebar_state="expanded"
)


with st.sidebar.container():
st.subheader("Chenyme-AAVT")
menu = sac.menu(
Expand All @@ -33,17 +32,17 @@
)
sac.divider(label='POWERED BY @CHENYME', icon="lightning-charge", align='center', color='gray')

with st.container():
if menu == "主页":
home()
elif menu == '内容助手':
content()
elif menu == '媒体识别':
media()
elif menu == '字幕翻译':
translation()
elif menu == '图文博客':
avtb()
elif menu == '声音模拟':
avg()

if menu == "主页":
home()
elif menu == '内容助手':
content()
elif menu == '媒体识别':
media()
elif menu == '字幕翻译':
translation()
elif menu == '图文博客':
avtb()
elif menu == '声音模拟':
avg()

Binary file added project/AVG/__pycache__/AVG.cpython-311.pyc
Binary file not shown.
6 changes: 3 additions & 3 deletions project/AVTB/AVTB.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def avtb():
with col3:
base_url = st.text_input("**BASE_URL:**", value=st.session_state.base_url)
with col4:
api_key = st.text_input("**API_KEY:**", value=st.session_state.api_key)
api_key = st.text_input("**API_KEY:**", value=st.session_state.api_key, type="password", placeholder="请输入API_KEY")
with col5:
temperature = st.number_input("**模型温度:**", min_value=0.0, max_value=1.0, value=0.8, step=0.1)

Expand Down Expand Up @@ -235,9 +235,9 @@ def avtb():
st.toast("文本提取完成!")
print("文本提取完成!\n")

# GPT4o生成文章
# GPT-4o-mini生成文章
text = result['text']
content = openai_api(api_key, base_url, "gpt-4o",
content = openai_api(api_key, base_url, "gpt-4o-mini",
"请你将下面的内容,以" + name + "的时间写成一篇文章" + text,
temperature, "你是一位写作高手!")

Expand Down
Binary file added project/AVTB/__pycache__/AVTB.cpython-311.pyc
Binary file not shown.
Binary file added project/__pycache__/__init__.cpython-311.pyc
Binary file not shown.
Binary file added project/__pycache__/content.cpython-311.pyc
Binary file not shown.
Binary file added project/__pycache__/home.cpython-311.pyc
Binary file not shown.
Binary file added project/__pycache__/media.cpython-311.pyc
Binary file not shown.
Binary file added project/__pycache__/translation.cpython-311.pyc
Binary file not shown.
20 changes: 10 additions & 10 deletions project/config/api.toml
Original file line number Diff line number Diff line change
@@ -1,32 +1,32 @@
[GEMINI]
gemini_key = "key"
gemini_key = ""
gemini_base = ""

[AI01]
AI01_key = "key"
AI01_key = ""
AI01_base = "https://api.lingyiwanwu.com/v1"

[KIMI]
kimi_key = "Y-"
kimi_key = ""
kimi_base = "https://api.moonshot.cn/v1"

[CHATGLM]
chatglm_key = "bd"
chatglm_base = "https://open.bigmodel.cn/api/paas/v4/"
chatglm_key = ""
chatglm_base = "https://open.bigmodel.cn/api/paas/v4"

[GPT]
openai_key = "sk-"
openai_key = ""
openai_base = "https://api.openai.com/v1"

[CLAUDE]
claude_key = "key"
claude_key = ""
claude_base = "https://api.anthropic.com/v1/messages"

[DEEPSEEK]
deepseek_key = "sk-"
deepseek_key = ""
deepseek_base = "https://api.deepseek.com/"

[LOCAL]
api_key = ""
base_url = "base"
model_name = "name"
base_url = ""
model_name = ""
2 changes: 1 addition & 1 deletion project/config/content.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
openai_whisper_api = true
faster_whisper_model_default = "tiny"
faster_whisper_model_local = false
faster_whisper_model_local_path = "F:/new/model/tiny"
faster_whisper_model_local_path = "D:/Project/AAVT/model/tiny"
gpu = false

[MORE]
Expand Down
1 change: 0 additions & 1 deletion project/config/font_data.txt
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@
System
8 changes: 4 additions & 4 deletions project/config/video.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
openai_whisper_api = true
faster_whisper_model_default = "tiny"
faster_whisper_model_local = false
faster_whisper_model_local_path = "F:/new/model/tiny"
faster_whisper_model_local_path = "D:/Project/AAVT/model/put models here(删除模型时请切换到该模型文件)"
gpu = false
vad = false
lang = "自动识别"

[TRANSLATE]
translate_model = [ 0,]
language1 = "English"
language2 = "中文"
language1 = "中文"
language2 = "English"
wait_time = 0.10000000000000003
prompt_pre = "Prompt for GPT"
prompt_pre = "Prompt for Others"

[SUBTITLE]
subtitle_model = "硬字幕"
Expand Down
72 changes: 33 additions & 39 deletions project/content.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,59 +119,53 @@ def content():
sac.divider(label='**参数提示**', icon='activity', align='center', color='gray')
with open(config_dir + '/content.toml', 'w', encoding='utf-8') as file:
toml.dump(content_config, file)
sac.alert(
label='**参数设置 已保存**',
description='**所有参数全部保存完毕**',
size='lg', radius=20, icon=True, closable=True, color='success')
st.success("""
**参数设置 已保存**
###### 所有参数设置已成功保存!""", icon=":material/check:")
else:
sac.divider(label='**参数提示**', icon='activity', align='center', color='gray')
sac.alert(
label='**参数设置 可能未保存**',
description='重新设置后请点击保存',
size='lg', radius=20, icon=True, closable=True, color='error')
st.error("""
**参数设置 未保存**
###### 参数设置尚未保存,请及时保存!""", icon=":material/close:")

if check_ffmpeg():
if check_cuda_support():
sac.alert(
label='**FFmpeg GPU加速正常**',
description='FFmpeg**加速可用**',
size='lg', radius=20, icon=True, closable=True, color='success')
st.success("""
**FFmpeg GPU加速正常**
###### 本次 FFmpeg 合并将使用 GPU 加速!""", icon=":material/check:")
else:
sac.alert(
label='**FFmpeg 状态正常**',
description='已**成功检测**到FFmpeg',
size='lg', radius=20, icon=True, closable=True, color='success')
st.warning("""
**FFmpeg 正常,但 GPU 加速失败**
###### FFmpeg hwaccels 失败!(可忽略)""", icon=":material/warning:")
else:
sac.alert(
label='**FFmpeg 状态错误**',
description='**未检测到**FFmpeg',
size='lg', radius=20, icon=True, closable=True, color='success')
st.error("""
**FFmpeg 状态错误**
###### 未检测到 FFmpeg,请确认!""", icon=":material/close:")

if openai_whisper_api:
sac.alert(
label='**Whipser API调用已开启**',
description='确保**OPENAI相关配置不为空**',
size='lg', radius=20, icon=True, closable=True, color='warning')
st.warning("""
**OpenAI API调用 识别 已开启**
###### 请确保 OPENAI 相关参数设置不为空!""", icon=":material/warning:")

if not openai_whisper_api:
if gpu:
sac.alert(
label='**GPU加速模式 已开启**',
description='**若未CUDA11请参阅[AAVT](https://zwho5v3j233.feishu.cn/wiki/OGcrwinzhi88MkkvEMVcLkDgnzc?from=from_copylink)**',
size='lg', radius=20, icon=True, closable=True, color='warning')

if not openai_whisper_api:
if torch.cuda.is_available():
st.success("""
**GPU加速模式 已开启**
###### 本次识别将使用 GPU 加速模式""", icon=":material/check:")
else:
st.error("""
**GPU加速模式 未开启**
###### 未检测到CUDA,请关闭 GPU 加速!""", icon=":material/close:")
if local_on:
sac.alert(
label='**Whisper 本地加载已开启**',
description='[模型下载](https://huggingface.co/Systran) | [使用文档](https://zwho5v3j233.feishu.cn/wiki/OGcrwinzhi88MkkvEMVcLkDgnzc?from=from_copylink)',
size='lg', radius=20, icon=True, closable=True, color='warning')
st.success("""
**本地模型识别 已开启**
###### [模型下载](https://huggingface.co/Systran) | [使用文档](https://zwho5v3j233.feishu.cn/wiki/OGcrwinzhi88MkkvEMVcLkDgnzc?from=from_copylink)""", icon=":material/check:")

if not torch.cuda.is_available():
sac.alert(
label='**CUDA/Pytorch 错误**',
description='请检查!**仅使用CPU请忽略**',
size='lg', radius=20, icon=True, closable=True, color='error')
st.error("""
**CUDA 状态错误**
###### 未检测到CUDA,CPU 用户请忽略!""", icon=":material/close:")

sac.divider(label='POWERED BY @CHENYME', icon="lightning-charge", align='center', color='gray', key="1")

Expand Down Expand Up @@ -225,7 +219,7 @@ def content():
sac.MenuItem('glm-4-airx', icon='robot')
]),
sac.MenuItem('ChatGPT-OpenAI', icon='node-plus-fill', children=[
sac.MenuItem('gpt-3.5-turbo', icon='robot'),
sac.MenuItem('gpt-4o-mini', icon='robot'),
sac.MenuItem('gpt-4', icon='robot'),
sac.MenuItem('gpt-4-turbo', icon='robot'),
sac.MenuItem('gpt-4o', icon='robot')
Expand Down
Loading

0 comments on commit 222cd0b

Please sign in to comment.