8000 [pull] main from rsxdalv:main by pull[bot] · Pull Request #71 · leftomelas/tts-generation-webui · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

[pull] main from rsxdalv:main #71

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,9 @@ May 10:
* Fix missing directory bug causing extensions to fail to load. Thanks Discord/Comstock for discovery of the bug.
* Add ACE-Step to React UI.
* Add emoji to Gradio UI categories for simplicity.
* Add enhanced logging for every update and app startup, allowing for easier debugging once issues happen.
* Show gr.Info when models are being loaded or unloaded.
* Allow users to use React UI together with Gradio auth by specifying GRADIO_AUTH="username:pass" environment variable.

May 7:
* Add [Piper TTS](https://github.com/rhasspy/piper) extension
Expand Down
25 changes: 15 additions & 10 deletions installer_scripts/init_app.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
const fs = require("fs");
const { resolve } = require("path");
const { $ } = require("./js/shell");
const { $, $sh } = require("./js/shell");
const { displayError, displayMessage } = require("./js/displayMessage.js");
const { processExit } = require("./js/processExit.js");
const { startServer } = require("./js/server.js");
Expand All @@ -11,17 +11,21 @@ const checkConda = async () => {
updateState({ status: "checking_dependencies", currentStep: 1 });

displayMessage("Checking conda installation...");
await $("conda --version");
await $sh("conda --version");

updateState({ condaReady: true });

displayMessage("");
// verify conda paths
await $("conda info --envs");
$sh("conda info --envs");

// expect
// # conda environments:
// #
// base * .. ..\tts-generation-webui-main\installer_files\env
$sh("node --version");
$sh("python --version");
$sh("pip --version");
} catch (error) {
updateState({ status: "error", lastError: "Conda installation not found" });

Expand All @@ -33,7 +37,7 @@ const checkConda = async () => {
};

const updateConda = async () => {
await $("conda update -y -n base -c defaults conda");
await $sh("conda update -y -n base -c defaults conda");
};

const FORCE_REINSTALL = process.env.FORCE_REINSTALL ? true : false;
Expand All @@ -59,13 +63,13 @@ const syncRepo = async () => {
displayMessage("Linking to tts-generation-webui repository");
// this is a clone over the files from https://github.com/rsxdalv/tts-generation-webui
try {
await $("git init -b main");
await $(
await $sh("git init -b main");
await $sh(
"git remote add origin https://github.com/rsxdalv/tts-generation-webui"
);
await $("git fetch");
await $("git reset --hard origin/main"); // Required when the versioned files existed in path before "git init" of this repo.
await $("git branch --set-upstream-to=origin/main");
await $sh("git fetch");
await $sh("git reset --hard origin/main"); // Required when the versioned files existed in path before "git init" of this repo.
await $sh("git branch --set-upstream-to=origin/main");

const newHash = getGitCommitHash();
updateState({ gitHash: newHash });
Expand All @@ -83,7 +87,7 @@ const syncRepo = async () => {
} else {
displayMessage("Pulling updates from tts-generation-webui");
try {
await $("git pull");
await $sh("git pull");
const newHash = getGitCommitHash();
updateState({ gitHash: newHash });
if (AppliedGitVersion.get() === newHash) {
Expand Down Expand Up @@ -123,6 +127,7 @@ async function main() {
const isUpdated = await syncRepo();
if (!isUpdated) {
updateState({ status: "ready", currentStep: 5, totalSteps: 5 });
$sh("pip show torch torchvision torchaudio");
return true;
}

Expand Down
1 change: 1 addition & 0 deletions installer_scripts/js/initializeApp.js
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,7 @@ const checkIfTorchHasCuda = async () => {

exports.repairTorch = async () => {
const gpuChoice = readGPUChoice();
$sh("pip show torch torchvision torchaudio");
if (!checkIfTorchHasCuda() && gpuChoice === "NVIDIA GPU") {
displayMessage("Backend is NVIDIA GPU, fixing PyTorch");
try {
Expand Down
4 changes: 3 additions & 1 deletion react-ui/src/pages/api/gradio/[name].tsx
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,9 @@ export default async function handler(
res.status(200).json(result);
}

const getClient = () => Client.connect(defaultBackend, {});
const getClient = () => Client.connect(defaultBackend, {
auth: process.env.GRADIO_AUTH?.split(":") as [string, string] | undefined,
});

type GradioChoices = {
choices: string[];
Expand Down
1 change: 1 addition & 0 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,7 @@ def signal_handler(signal, frame, postgres_process):
env={
**os.environ,
"GRADIO_BACKEND_AUTOMATIC": f"http://127.0.0.1:{gradio_interface_options['server_port']}/",
# "GRADIO_AUTH": gradio_interface_options["auth"].join(":"),
},
shell=True,
)
Expand Down
5 changes: 4 additions & 1 deletion tts_webui/utils/log_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,10 @@ def StringifyParams(x):


def middleware_log_generation(params: dict):
print("Generating: '''", params["text"], "'''")
text = params.get("text", "")
if text:
text = text[:50] + "..." if len(text) > 50 else text
print(f"Generating: '''{text}'''")
print(StringifyParams(params))


Expand Down
21 changes: 17 additions & 4 deletions tts_webui/utils/manage_model_state.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
import gradio as gr
from tts_webui.utils.torch_clear_memory import torch_clear_memory


def show(message):
print(message)
gr.Info(message)


class ModelState:
def __init__(self):
self._model = None
Expand Down Expand Up @@ -35,14 +41,14 @@ def wrapper(model_name, *args, **kwargs):
model_state = model_states[model_namespace]

if not model_state.is_model_loaded(model_name):
print(
show(
f"Model '{model_name}' in namespace '{model_namespace}' is not loaded or is different. Loading model..."
)
unload_model(model_namespace)
model = func(model_name, *args, **kwargs)
model_state.set_model(model, model_name)
else:
print(
show(
f"Using cached model '{model_name}' in namespace '{model_namespace}'."
)

Expand All @@ -61,9 +67,9 @@ def unload_model(model_namespace):
model_states[model_namespace].set_model(None, None)
# del model_states[model_namespace]
torch_clear_memory()
print(f"Model in namespace '{model_namespace}' has been unloaded.")
show(f"Model in namespace '{model_namespace}' has been unloaded.")
else:
print(f"No model loaded in namespace '{model_namespace}'.")
show(f"No model loaded in namespace '{model_namespace}'.")


def unload_all_models():
Expand All @@ -82,3 +88,10 @@ def list_loaded_models_as_markdown():
lines.append(f"| {namespace} | Not Loaded |")

return "\n".join(lines)


def is_model_loaded(model_namespace):
return (
model_namespace in model_states
and model_states[model_namespace].get_model() is not None
)
Loading
0