Compare commits

..

No commits in common. "87a4c9fd2b0a48452c39a12d5d3d5206862c982a" and "ecc3131530b7c08bc4e067e7cd50a2b5f70d9c8e" have entirely different histories.

View File

@ -9,14 +9,6 @@ os.makedirs(profiles_folder, exist_ok=True)
model_folder = "./text-generation-webui/models"
extensions_folder = "./text-generation-webui/extensions"
loras_folder = "./text-generation-webui/loras"
characters_folder = "./text-generation-webui/characters"
try:
output = subprocess.check_output(['nvidia-smi'])
nvidia_gpu = True
except:
nvidia_gpu = False
pass
# # Get the absolute path of the script file
script_path = os.path.abspath(__file__)
@ -80,7 +72,7 @@ class MainWindow(QMainWindow):
self.update_check()
def init_ui(self):
self.setWindowTitle(f'StartUI for oobabooga webui v{version}')
self.setWindowTitle('StartUI for oobabooga webui')
# Menu Bar
menu = self.menuBar()
@ -126,34 +118,24 @@ class MainWindow(QMainWindow):
help_menu.addAction(report_bug_action)
layout = QGridLayout()
layout.setColumnMinimumWidth(0, 350)
layout.setColumnMinimumWidth(3, 30)
# Model Dropdown
# Get the list of model folders
model_folders = [name for name in os.listdir(model_folder) if os.path.isdir(os.path.join(model_folder, name))]
model_folders.append("none")
self.model_dropdown = QComboBox()
self.model_dropdown.addItem("none")
self.model_dropdown.addItems(model_folders)
layout.addWidget(QLabel("Choose Model:"))
self.model_dropdown.setToolTip("Select your prefered Model")
layout.addWidget(self.model_dropdown, 1, 0)
self.model_type = QComboBox()
self.model_type.addItems(["none", "llama", "opt", "gptj"])
self.model_type.addItems(["llama", "opt", "gptj", "none"])
layout.addWidget(QLabel("Choose Model Type:"), 3, 0)
self.model_type.setToolTip("Select the Model Type")
layout.addWidget(self.model_type, 4, 0)
self.character_to_load = QComboBox()
character_jsons = [file for file in os.listdir(characters_folder) if file.endswith(".json")]
without_suffix = [file.replace(".json", "") for file in character_jsons]
self.character_to_load.addItem("none")
self.character_to_load.addItems(without_suffix)
layout.addWidget(QLabel("Choose Character:"), 3, 1)
self.character_to_load.setToolTip("Select the Character you want to load")
layout.addWidget(self.character_to_load, 4, 1)
self.reload_model_button = QPushButton("Reload")
self.reload_model_button.setToolTip("Reloads the Names in the Models Folder")
self.reload_model_button.clicked.connect(self.reload_models)
@ -162,14 +144,14 @@ class MainWindow(QMainWindow):
# WBIT Dropdown Menu
self.wbit_dropdown = QComboBox()
self.wbit_dropdown.addItems(["none", "1", "2", "3", "4","8"])
self.wbit_dropdown.addItems(["1", "2", "3", "4","8", "none"])
layout.addWidget(QLabel("Choose Wbits:"),5, 0)
self.wbit_dropdown.setToolTip("Select the bits quantization for this model\nExample: vicuna 7b 4bit you should choose 4.\nYou can keep it at none, the webui will determine it automatically if the wbits are mentioned in the name of the model")
layout.addWidget(self.wbit_dropdown, 6, 0)
# Groupsize Dropdown Menu
self.gsize_dropdown = QComboBox()
self.gsize_dropdown.addItems(["none", "32", "64", "128", "1024"])
self.gsize_dropdown.addItems(["32", "64", "128", "1024", "none"])
layout.addWidget(QLabel("Choose Groupsize:"), 5, 1)
self.gsize_dropdown.setToolTip("Select the groupsize used by the Model.\nExample: vicuna 7b 4bit-128g you should choose 128.\nYou can keep it at none, the webui will determine it automatically if the groupsize is mentioned in the name of the model")
layout.addWidget(self.gsize_dropdown, 6, 1, 1, 2)
@ -195,13 +177,8 @@ class MainWindow(QMainWindow):
# GPU Checkbox and Sliders
self.gpu_radio_button = QRadioButton("Use GPU")
if nvidia_gpu:
self.gpu_radio_button.setChecked(True)
self.gpu_radio_button.setToolTip("Choose if you want to use your GPU")
else:
self.gpu_radio_button.setToolTip("AMD or Intel GPU's are currently not supported.")
self.gpu_radio_button.setChecked(False)
self.gpu_radio_button.setEnabled(False)
self.gpu_radio_button.setToolTip("Choose if you want to use your GPU")
self.gpu_radio_button.setChecked(True)
layout.addWidget(self.gpu_radio_button, 10, 0)
self.cpu_radio_button = QRadioButton("Use CPU")
@ -211,40 +188,35 @@ class MainWindow(QMainWindow):
self.auto_radio_button = QRadioButton("Autodevice")
self.auto_radio_button.setToolTip("Let the webui decide whats best for you!")
if nvidia_gpu:
self.auto_radio_button.setChecked(False)
else:
self.auto_radio_button.setChecked(True)
self.auto_radio_button.setChecked(False)
layout.addWidget(self.auto_radio_button, 10, 2)
self.gpu_radio_button.toggled.connect(self.on_gpu_radio_button_toggled)
self.cpu_radio_button.toggled.connect(self.on_cpu_radio_button_toggled)
self.auto_radio_button.toggled.connect(self.on_auto_radio_button_toggled)
if nvidia_gpu:
self.gpu_vram_sliders = []
self.gpu_vram_labels = []
self.gpu_labels = []
gpu_stats = gpustat.GPUStatCollection.new_query()
self.gpu_vram_sliders = []
self.gpu_vram_labels = []
self.gpu_labels = []
for i, gpu in enumerate(gpu_stats):
gpu_label = QLabel(f"{gpu.name} VRAM:")
gpu_label.setToolTip(f"Total VRAM: {gpu.memory_total} MiB\nUsed VRAM: {gpu.memory_used} MiB\nFree VRAM: {gpu.memory_free} MiB")
layout.addWidget(gpu_label, 11 + i, 0)
self.gpu_labels.append(gpu_label)
vram_slider = QSlider(Qt.Horizontal)
vram_slider.setMaximum(int(gpu.memory_total / 1024))
vram_slider.valueChanged.connect(lambda value, idx=i: self.on_vram_slider_changed(value, idx))
layout.addWidget(vram_slider, 11 + i, 1)
vram_value_label = QLabel("0 GiB")
layout.addWidget(vram_value_label, 11 + i, 2)
self.gpu_vram_labels.append(vram_value_label)
self.gpu_vram_sliders.append(vram_slider)
else:
gpu_stats = [""]
gpu_stats = gpustat.GPUStatCollection.new_query()
for i, gpu in enumerate(gpu_stats):
gpu_label = QLabel(f"{gpu.name} VRAM:")
gpu_label.setToolTip(f"Total VRAM: {gpu.memory_total} MiB\nUsed VRAM: {gpu.memory_used} MiB\nFree VRAM: {gpu.memory_free} MiB")
layout.addWidget(gpu_label, 11 + i, 0)
self.gpu_labels.append(gpu_label)
vram_slider = QSlider(Qt.Horizontal)
vram_slider.setMaximum(int(gpu.memory_total / 1024))
vram_slider.valueChanged.connect(lambda value, idx=i: self.on_vram_slider_changed(value, idx))
layout.addWidget(vram_slider, 11 + i, 1)
vram_value_label = QLabel("0 GiB")
layout.addWidget(vram_value_label, 11 + i, 2)
self.gpu_vram_labels.append(vram_value_label)
self.gpu_vram_sliders.append(vram_slider)
# Create the "Built-in RAM" label, slider, and value label
self.ram_label = QLabel("Built-in RAM:")
@ -276,7 +248,7 @@ class MainWindow(QMainWindow):
self.pre_layer_slider.setSingleStep(1)
layout.addWidget(QLabel("Pre-layer:"), 11 + len(gpu_stats), 0)
self.pre_layer_slider.setToolTip("The number of layers to allocate to the GPU. Setting this parameter enables CPU offloading for 4-bit models.")
layout.addWidget(self.pre_layer_slider, 11 + len(gpu_stats), 1)
layout.addWidget(self.pre_layer_slider)
self.pre_layer_slider.valueChanged.connect(self.on_pre_layer_slider_changed)
self.pre_layer_value_label = QLabel("0")
@ -661,11 +633,10 @@ class MainWindow(QMainWindow):
def on_cpu_radio_button_toggled(self, checked):
# Hide/show GPU-related widgets
if nvidia_gpu:
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
label_gpu.hide()
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
label_gpu.hide()
# Show RAM slider and value label
self.ram_label.setVisible(checked)
@ -673,19 +644,16 @@ class MainWindow(QMainWindow):
self.ram_value_label.setVisible(checked)
# Uncheck GPU and Autodevice radio buttons
if checked and nvidia_gpu:
if checked:
self.gpu_radio_button.setChecked(False)
self.auto_radio_button.setChecked(False)
elif checked and not nvidia_gpu:
self.auto_radio_button.setChecked(False)
def on_auto_radio_button_toggled(self, checked):
# Hide/show GPU-related widgets
if nvidia_gpu:
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
label_gpu.hide()
for slider, label_vram, label_gpu in zip(self.gpu_vram_sliders, self.gpu_vram_labels, self.gpu_labels):
slider.hide()
label_vram.hide()
label_gpu.hide()
# Hide RAM slider and value label
self.ram_label.hide()
@ -693,11 +661,9 @@ class MainWindow(QMainWindow):
self.ram_value_label.hide()
# Uncheck GPU and CPU radio buttons
if checked and nvidia_gpu:
if checked:
self.gpu_radio_button.setChecked(False)
self.cpu_radio_button.setChecked(False)
elif checked and not nvidia_gpu:
self.cpu_radio_button.setChecked(False)
def on_listen_port_checkbox_changed(self, state):
self.listen_port_textfield.setEnabled(state == Qt.Checked)
@ -748,7 +714,6 @@ class MainWindow(QMainWindow):
"authentication": self.authentication_checkbox.isChecked(), # Saves the state of the Authentication
"authentication_file": self.choose_file_label.text(), # Save the authentication file path
"gpu_vram": [slider.value() for slider in self.gpu_vram_sliders], # Saves the VRAM Values
"character": self.character_to_load.currentText(), # Saves the Characters given in the Textbox
"use_extension": self.use_extensions_checkbox.isChecked(), # Saves the state of the Extension Checkbox
"extensions": [self.extensions_list.item(i).text() for i in range(self.extensions_list.count()) if self.extensions_list.item(i).checkState() == Qt.Checked], # Saves the chosen Extensions
"use_lora": self.use_lora_checkbox.isChecked(), # Saves the state of the Lora Checkbox
@ -788,17 +753,14 @@ class MainWindow(QMainWindow):
command += f" --model_type {chosen_model_type}"
# Add loras to the command
# if self.loras_checkbox.isChecked():
# loras = self.lora_list.item(i).text() for i in range(self.lora_list.count()) if self.lora_list.item(i).checkState() == Qt.Checked
# command += f" --lora {loras}"
loras = [self.lora_list.item(i).text() for i in range(self.lora_list.count()) if self.lora_list.item(i).checkState() == Qt.Checked]
if self.use_lora_checkbox.isChecked() and self.model_dropdown.currentText() != "none":
if loras:
command += f" --lora {' '.join(loras)}"
# Add Characters to the command
chosen_characters = self.character_to_load.currentText()
if self.character_to_load.currentText() != "none":
command += f" --character {chosen_characters}"
print(chosen_characters)
# Adds wbits to the command, if not "none"
chosen_wbits = self.wbit_dropdown.currentText()
if self.wbit_dropdown.currentText() != "none":
@ -982,7 +944,6 @@ class MainWindow(QMainWindow):
self.use_nocache_checkbox.setChecked(settings.get("nocache", False))
self.authentication_checkbox.setChecked(settings.get("authentication", False))
self.choose_file_label.setText(settings.get("authentication_file", ""))
self.character_to_load.setCurrentText(settings.get("character", ""))
self.pre_layer_slider.setValue(int(settings.get("prelayer", 0)))
self.use_autolaunch_checkbox.setChecked(settings.get("autolaunch", False))
self.use_network_checkbox.setChecked(settings.get("listen", False))
@ -1035,4 +996,4 @@ if __name__ == "__main__":
if darkdetect.isDark():
dark_stylesheet = qdarkstyle.load_stylesheet_pyqt5()
app.setStyleSheet(dark_stylesheet)
sys.exit(app.exec_())
sys.exit(app.exec_())