6 Commits

2 changed files with 114 additions and 57 deletions

View File

@@ -4,19 +4,18 @@ on:
jobs:
package:
runs-on: ${{ matrix.os }}
runs-on: self-hosted
strategy:
matrix:
os: [windows-latest, ubuntu-latest]
steps:
- name: Set up Python
uses: actions/setup-python@v2
uses: actions/setup-python@v3
with:
python-version: 3.x
node-version: 16
- name: Checkout repository
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Install dependencies
run: |
@@ -30,7 +29,7 @@ jobs:
cp webuiGUI.py dist/ # Copy webuiGUI.py to the dist directory
- name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: ${{ matrix.os }}-binary-v1.3.1
path: dist

View File

@@ -9,6 +9,14 @@ os.makedirs(profiles_folder, exist_ok=True)
model_folder = "./text-generation-webui/models"
extensions_folder = "./text-generation-webui/extensions"
loras_folder = "./text-generation-webui/loras"
characters_folder = "./text-generation-webui/characters"
try:
output = subprocess.check_output(['nvidia-smi'])
nvidia_gpu = True
except:
nvidia_gpu = False
pass
# # Get the absolute path of the script file
script_path = os.path.abspath(__file__)
@@ -72,7 +80,7 @@ class MainWindow(QMainWindow):
self.update_check()
def init_ui(self):
self.setWindowTitle('StartUI for oobabooga webui')
self.setWindowTitle(f'StartUI for oobabooga webui v{version}')
# Menu Bar
menu = self.menuBar()
@@ -84,12 +92,6 @@ class MainWindow(QMainWindow):
help_menu = menu.addMenu("Help")
# Help menu actions
# About Action
about_action = QAction("About", self)
about_action.setToolTip("Opens the About Page")
about_action.triggered.connect(self.show_about_window)
help_menu.addAction(about_action)
# Github action
github_action = QAction("Github", self)
github_action.setStatusTip("Opens the Github Page")
@@ -108,25 +110,50 @@ class MainWindow(QMainWindow):
help_menu.addAction(version_action)
version_action.triggered.connect(self.show_version_window)
# About Action
about_action = QAction("About", self)
about_action.setToolTip("Opens the About Page")
about_action.triggered.connect(self.show_about_window)
help_menu.addAction(about_action)
# seperator
help_menu.addSeparator()
# Report Bug
report_bug_action = QAction("Report Bug", self)
report_bug_action.setToolTip("Opens the Github Issue Page with creating a new issue")
report_bug_action.triggered.connect(self.on_report_bug_clicked)
help_menu.addAction(report_bug_action)
layout = QGridLayout()
layout.setColumnMinimumWidth(0, 350)
layout.setColumnMinimumWidth(3, 30)
# Model Dropdown
# Get the list of model folders
model_folders = [name for name in os.listdir(model_folder) if os.path.isdir(os.path.join(model_folder, name))]
model_folders.append("none")
self.model_dropdown = QComboBox()
self.model_dropdown.addItem("none")
self.model_dropdown.addItems(model_folders)
layout.addWidget(QLabel("Choose Model:"))
self.model_dropdown.setToolTip("Select your prefered Model")
layout.addWidget(self.model_dropdown, 1, 0)
self.model_type = QComboBox()
self.model_type.addItems(["llama", "opt", "gptj", "none"])
self.model_type.addItems(["none", "llama", "opt", "gptj"])
layout.addWidget(QLabel("Choose Model Type:"), 3, 0)
self.model_type.setToolTip("Select the Model Type")
layout.addWidget(self.model_type, 4, 0)
self.character_to_load = QComboBox()
character_jsons = [file for file in os.listdir(characters_folder) if file.endswith(".json")]
without_suffix = [file.replace(".json", "") for file in character_jsons]
self.character_to_load.addItem("none")
self.character_to_load.addItems(without_suffix)
layout.addWidget(QLabel("Choose Character:"), 3, 1)
self.character_to_load.setToolTip("Select the Character you want to load")
layout.addWidget(self.character_to_load, 4, 1)
self.reload_model_button = QPushButton("Reload")
self.reload_model_button.setToolTip("Reloads the Names in the Models Folder")
self.reload_model_button.clicked.connect(self.reload_models)
@@ -135,14 +162,14 @@ class MainWindow(QMainWindow):
# WBIT Dropdown Menu
self.wbit_dropdown = QComboBox()
self.wbit_dropdown.addItems(["1", "2", "3", "4","8", "none"])
self.wbit_dropdown.addItems(["none", "1", "2", "3", "4","8"])
layout.addWidget(QLabel("Choose Wbits:"),5, 0)
self.wbit_dropdown.setToolTip("Select the bits quantization for this model\nExample: vicuna 7b 4bit you should choose 4.\nYou can keep it at none, the webui will determine it automatically if the wbits are mentioned in the name of the model")
layout.addWidget(self.wbit_dropdown, 6, 0)
# Groupsize Dropdown Menu
self.gsize_dropdown = QComboBox()
self.gsize_dropdown.addItems(["32", "64", "128", "1024", "none"])
self.gsize_dropdown.addItems(["none", "32", "64", "128", "1024"])
layout.addWidget(QLabel("Choose Groupsize:"), 5, 1)
self.gsize_dropdown.setToolTip("Select the groupsize used by the Model.\nExample: vicuna 7b 4bit-128g you should choose 128.\nYou can keep it at none, the webui will determine it automatically if the groupsize is mentioned in the name of the model")
layout.addWidget(self.gsize_dropdown, 6, 1, 1, 2)
@@ -168,8 +195,13 @@ class MainWindow(QMainWindow):
# GPU Checkbox and Sliders
self.gpu_radio_button = QRadioButton("Use GPU")
self.gpu_radio_button.setToolTip("Choose if you want to use your GPU")
if nvidia_gpu:
self.gpu_radio_button.setChecked(True)
self.gpu_radio_button.setToolTip("Choose if you want to use your GPU")
else:
self.gpu_radio_button.setToolTip("AMD or Intel GPU's are currently not supported.")
self.gpu_radio_button.setChecked(False)
self.gpu_radio_button.setEnabled(False)
layout.addWidget(self.gpu_radio_button, 10, 0)
self.cpu_radio_button = QRadioButton("Use CPU")
@@ -179,17 +211,20 @@ class MainWindow(QMainWindow):
self.auto_radio_button = QRadioButton("Autodevice")
self.auto_radio_button.setToolTip("Let the webui decide whats best for you!")
if nvidia_gpu:
self.auto_radio_button.setChecked(False)
else:
self.auto_radio_button.setChecked(True)
layout.addWidget(self.auto_radio_button, 10, 2)
self.gpu_radio_button.toggled.connect(self.on_gpu_radio_button_toggled)
self.cpu_radio_button.toggled.connect(self.on_cpu_radio_button_toggled)
self.auto_radio_button.toggled.connect(self.on_auto_radio_button_toggled)
if nvidia_gpu:
self.gpu_vram_sliders = []
self.gpu_vram_labels = []
self.gpu_labels = []
gpu_stats = gpustat.GPUStatCollection.new_query()
for i, gpu in enumerate(gpu_stats):
@@ -208,6 +243,8 @@ class MainWindow(QMainWindow):
self.gpu_vram_labels.append(vram_value_label)
self.gpu_vram_sliders.append(vram_slider)
else:
gpu_stats = [""]
# Create the "Built-in RAM" label, slider, and value label
self.ram_label = QLabel("Built-in RAM:")
@@ -239,7 +276,7 @@ class MainWindow(QMainWindow):
self.pre_layer_slider.setSingleStep(1)
layout.addWidget(QLabel("Pre-layer:"), 11 + len(gpu_stats), 0)
self.pre_layer_slider.setToolTip("The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.")
layout.addWidget(self.pre_layer_slider)
layout.addWidget(self.pre_layer_slider, 11 + len(gpu_stats), 1)
self.pre_layer_slider.valueChanged.connect(self.on_pre_layer_slider_changed)
self.pre_layer_value_label = QLabel("0")
@@ -484,6 +521,16 @@ class MainWindow(QMainWindow):
except OSError:
self.show_error_message("Error", f"Could not open the link. Please open it manually.\n{release_url}")
def on_report_bug_clicked(self):
github_new_issue = "https://github.com/Pakobbix/StartUI-oobabooga-webui/issues/new"
if sys.platform == "win32":
os.startfile(github_new_issue)
else:
try:
subprocess.Popen(["xdg-open", github_new_issue])
except OSError:
self.show_error_message("Error", f"Could not open the link. Please open it manually.\n{github_new_issue}")
def on_Github_clicked(self):
startui_url = "https://github.com/Pakobbix/StartUI-oobabooga-webui/"
if sys.platform == "win32":
@@ -614,6 +661,7 @@ class MainWindow(QMainWindow):
def on_cpu_radio_button_toggled(self, checked):
# Hide/show GPU-related widgets
if nvidia_gpu:
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
@@ -625,12 +673,15 @@ class MainWindow(QMainWindow):
self.ram_value_label.setVisible(checked)
# Uncheck GPU and Autodevice radio buttons
if checked:
if checked and nvidia_gpu:
self.gpu_radio_button.setChecked(False)
self.auto_radio_button.setChecked(False)
elif checked and not nvidia_gpu:
self.auto_radio_button.setChecked(False)
def on_auto_radio_button_toggled(self, checked):
# Hide/show GPU-related widgets
if nvidia_gpu:
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
@@ -642,9 +693,11 @@ class MainWindow(QMainWindow):
self.ram_value_label.hide()
# Uncheck GPU and CPU radio buttons
if checked:
if checked and nvidia_gpu:
self.gpu_radio_button.setChecked(False)
self.cpu_radio_button.setChecked(False)
elif checked and not nvidia_gpu:
self.cpu_radio_button.setChecked(False)
def on_listen_port_checkbox_changed(self, state):
self.listen_port_textfield.setEnabled(state == Qt.Checked)
@@ -695,6 +748,7 @@ class MainWindow(QMainWindow):
"authentication": self.authentication_checkbox.isChecked(), # Saves the state of the Authentication
"authentication_file": self.choose_file_label.text(), # Save the authentication file path
"gpu_vram": [slider.value() for slider in self.gpu_vram_sliders], # Saves the VRAM Values
"character": self.character_to_load.currentText(), # Saves the Characters given in the Textbox
"use_extension": self.use_extensions_checkbox.isChecked(), # Saves the state of the Extension Checkbox
"extensions": [self.extensions_list.item(i).text() for i in range(self.extensions_list.count()) if self.extensions_list.item(i).checkState() == Qt.Checked], # Saves the chosen Extensions
"use_lora": self.use_lora_checkbox.isChecked(), # Saves the state of the Lora Checkbox
@@ -734,14 +788,17 @@ class MainWindow(QMainWindow):
command += f" --model_type {chosen_model_type}"
# Add loras to the command
# if self.loras_checkbox.isChecked():
# loras = self.lora_list.item(i).text() for i in range(self.lora_list.count()) if self.lora_list.item(i).checkState() == Qt.Checked
# command += f" --lora {loras}"
loras = [self.lora_list.item(i).text() for i in range(self.lora_list.count()) if self.lora_list.item(i).checkState() == Qt.Checked]
if self.use_lora_checkbox.isChecked() and self.model_dropdown.currentText() != "none":
if loras:
command += f" --lora {' '.join(loras)}"
# Add Characters to the command
chosen_characters = self.character_to_load.currentText()
if self.character_to_load.currentText() != "none":
command += f" --character {chosen_characters}"
print(chosen_characters)
# Adds wbits to the command, if not "none"
chosen_wbits = self.wbit_dropdown.currentText()
if self.wbit_dropdown.currentText() != "none":
@@ -860,11 +917,11 @@ class MainWindow(QMainWindow):
command += f" --extensions {' '.join(extensions)}"
# Just for debugging.
print(f"Command generated: python webuiGUI.py {command}")
#print(f"Command generated: python webuiGUI.py {command}")
# Based on the Model that's chosen, we will take care of some necessary stuff.
# Starts the webui in the conda env with the user given Options
#run_cmd_with_conda(f"python webuiGUI.py {command}")
run_cmd_with_conda(f"python webuiGUI.py {command}")
if self.use_autoclose_checkbox.isChecked():
sys.exit()
@@ -925,6 +982,7 @@ class MainWindow(QMainWindow):
self.use_nocache_checkbox.setChecked(settings.get("nocache", False))
self.authentication_checkbox.setChecked(settings.get("authentication", False))
self.choose_file_label.setText(settings.get("authentication_file", ""))
self.character_to_load.setCurrentText(settings.get("character", ""))
self.pre_layer_slider.setValue(int(settings.get("prelayer", 0)))
self.use_autolaunch_checkbox.setChecked(settings.get("autolaunch", False))
self.use_network_checkbox.setChecked(settings.get("listen", False))