Skip to content

Commit

Permalink
v2.2.3 - add server settings
Browse files Browse the repository at this point in the history
Added the ability to change the settings of the LM Studio server.

port - changes the port to look for the server at
max_tokens - changes the maximum response length from LM Studio.  The maximum response from LM Studio + your question and context provided to LM Studio, must not exceed the "Context Length" setting within LM Studio.
  • Loading branch information
BBC-Esq authored Oct 15, 2023
1 parent 5c21855 commit cfaa0d5
Show file tree
Hide file tree
Showing 6 changed files with 132 additions and 24 deletions.
14 changes: 8 additions & 6 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,17 @@ AVAILABLE_MODELS:
- sentence-transformers/gtr-t5-large
- sentence-transformers/gtr-t5-base
COMPUTE_DEVICE: cuda
EMBEDDING_MODEL_NAME: C:/PATH/Scripts/LM Search Vector Database/v2_0_1 - working/Embedding_Models/hkunlp--instructor-xl
EMBEDDING_MODEL_NAME: C:/PATH/Scripts/ChromaDB-Plugin-for-LM-Studio/v2_2_2 - working/Embedding_Models/BAAI--bge-base-en-v1.5
server:
api_key: ''
connection_str: http://localhost:1234/v1
model_max_tokens: 500
model_temperature: 0.1
prefix: '[INST]'
suffix: '[/INST]'
styles:
button: 'background-color: #323842; color: light gray; font: 10pt "Segoe UI Historic";
width: 29;'
frame: 'background-color: #161b22;'
input: 'background-color: #2e333b; color: light gray; font: 13pt "Segoe UI Historic";'
text: 'background-color: #092327; color: light gray; font: 12pt "Segoe UI Historic";'
tabs:
- name: Settings
placeholder: Placeholder text for Settings tab.
- name: Models
placeholder: Placeholder text for Models tab.
2 changes: 1 addition & 1 deletion gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def init_ui(self):
self.left_frame = QFrame()
left_vbox = QVBoxLayout()

tab_widget = create_tabs(tabs_config)
tab_widget = create_tabs()
left_vbox.addWidget(tab_widget)

button_data = [
Expand Down
46 changes: 36 additions & 10 deletions gui_tabs.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,56 @@
from PySide6.QtWebEngineWidgets import QWebEngineView
from PySide6.QtWidgets import QTextEdit, QTabWidget
from PySide6.QtWidgets import QTextEdit, QTabWidget, QVBoxLayout, QWidget, QGroupBox
from PySide6.QtCore import QUrl
import os
from gui_tabs_settings_server import ServerSettingsTab
from gui_tabs_settings_models import ModelsSettingsTab

def create_tabs(tabs_config):
def create_tabs():
tab_widget = QTabWidget()
tab_widget.setTabPosition(QTabWidget.South)

tab_widgets = [QTextEdit(tab.get('placeholder', '')) for tab in tabs_config]
for i, tab in enumerate(tabs_config):
tab_widget.addTab(tab_widgets[i], tab.get('name', ''))
user_manual_folder = os.path.join(os.path.dirname(__file__), 'User_Manual')

# Create Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout()

# Add Server and EmbeddingModel settings to Settings tab
server_settings = ServerSettingsTab()
server_group = QGroupBox("Server/LLM Settings")
server_layout = QVBoxLayout()
server_layout.addWidget(server_settings)
server_group.setLayout(server_layout)

models_settings = ModelsSettingsTab()
models_group = QGroupBox("Embedding Models Settings")
models_layout = QVBoxLayout()
models_layout.addWidget(models_settings)
models_group.setLayout(models_layout)

settings_layout.addWidget(server_group)
settings_layout.addWidget(models_group)
settings_tab.setLayout(settings_layout)

# Adding the Tutorial tab
tab_widget.addTab(settings_tab, 'Settings')

# Create Models tab
models_tab = QTextEdit("Placeholder text for Models tab.")
tab_widget.addTab(models_tab, 'Models')

# Create Floating Point Formats tab
tutorial_tab = QWebEngineView()
tab_widget.addTab(tutorial_tab, 'Floating Point Formats')
user_manual_folder = os.path.join(os.path.dirname(__file__), 'User_Manual')
tutorial_html_path = os.path.join(user_manual_folder, 'number_format.html')
tutorial_tab.setUrl(QUrl.fromLocalFile(tutorial_html_path))
tab_widget.addTab(tutorial_tab, 'Floating Point Formats')

# Adding the Whisper tab
# Create Whisper tab
whisper_tab = QWebEngineView()
whisper_html_path = os.path.join(user_manual_folder, 'whisper_quants.html')
whisper_tab.setUrl(QUrl.fromLocalFile(whisper_html_path))
tab_widget.addTab(whisper_tab, 'Whisper')

# Adding the Tips tab
# Create Tips tab
tips_tab = QWebEngineView()
tips_html_path = os.path.join(user_manual_folder, 'tips.html')
tips_tab.setUrl(QUrl.fromLocalFile(tips_html_path))
Expand Down
11 changes: 11 additions & 0 deletions gui_tabs_settings_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from PySide6.QtWidgets import QWidget, QLabel, QVBoxLayout

class ModelsSettingsTab(QWidget):
def __init__(self):
super(ModelsSettingsTab, self).__init__()

self.label = QLabel("Placeholder text for embedding model settings, which are coming soon.")

layout = QVBoxLayout()
layout.addWidget(self.label)
self.setLayout(layout)
61 changes: 61 additions & 0 deletions gui_tabs_settings_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
from PySide6.QtWidgets import QWidget, QLabel, QLineEdit, QPushButton, QGridLayout, QMessageBox
from PySide6.QtGui import QIntValidator
import yaml

class ServerSettingsTab(QWidget):
def __init__(self):
super(ServerSettingsTab, self).__init__()

with open('config.yaml', 'r') as file:
config_data = yaml.safe_load(file)
self.connection_str = config_data.get('server', {}).get('connection_str', '')
self.current_port = self.connection_str.split(":")[-1].split("/")[0]
self.current_max_tokens = config_data.get('server', {}).get('model_max_tokens', '')

# Server port setting widgets
self.port_label = QLabel(f"Current Port: {self.current_port}")
self.new_port_edit = QLineEdit()
self.new_port_edit.setPlaceholderText("Enter new port...")
self.new_port_edit.setValidator(QIntValidator())

# Max token setting widgets
self.max_tokens_label = QLabel(f"Max Tokens: {self.current_max_tokens}")
self.new_max_tokens_edit = QLineEdit()
self.new_max_tokens_edit.setPlaceholderText("Enter new max tokens...")
self.new_max_tokens_edit.setValidator(QIntValidator())

# Update config button
self.update_button = QPushButton("Update")
self.update_button.clicked.connect(self.update_config)

layout = QGridLayout()
layout.addWidget(self.port_label, 0, 0)
layout.addWidget(self.new_port_edit, 0, 1)
layout.addWidget(self.max_tokens_label, 1, 0)
layout.addWidget(self.new_max_tokens_edit, 1, 1)
layout.addWidget(self.update_button, 2, 1)

self.setLayout(layout)

def update_config(self):
new_port = self.new_port_edit.text()
new_max_tokens = self.new_max_tokens_edit.text()

# Update config.yaml if at least one setting was changed - otherwise display message
if not new_port and not new_max_tokens:
QMessageBox.warning(self, 'No Updates', 'No new values were entered.')
return

with open('config.yaml', 'r') as file:
config_data = yaml.safe_load(file)

if new_port:
config_data['server']['connection_str'] = self.connection_str.replace(self.current_port, new_port)
self.port_label.setText(f"Current Port: {new_port}")

if new_max_tokens:
config_data['server']['model_max_tokens'] = int(new_max_tokens)
self.max_tokens_label.setText(f"Max Tokens: {new_max_tokens}")

with open('config.yaml', 'w') as file:
yaml.safe_dump(config_data, file)
22 changes: 15 additions & 7 deletions server_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,25 @@
chroma_db_impl="duckdb+parquet", persist_directory=PERSIST_DIRECTORY, anonymized_telemetry=False
)

openai.api_base = 'http://localhost:1234/v1'
openai.api_key = ''

prefix = "[INST]"
suffix = "[/INST]"

def connect_to_local_chatgpt(prompt):
with open('config.yaml', 'r') as config_file:
config = yaml.safe_load(config_file)
server_config = config.get('server', {})
openai_api_base = server_config.get('connection_str')
openai_api_key = server_config.get('api_key')
prefix = server_config.get('prefix')
suffix = server_config.get('suffix')
model_temperature = server_config.get('model_temperature')
model_max_tokens = server_config.get('model_max_tokens')

openai.api_base = openai_api_base
openai.api_key = openai_api_key

formatted_prompt = f"{prefix}{prompt}{suffix}"
response = openai.ChatCompletion.create(
model="local model",
temperature=0.1,
temperature=model_temperature,
max_tokens=model_max_tokens,
messages=[{"role": "user", "content": formatted_prompt}]
)
return response.choices[0].message["content"]
Expand Down

0 comments on commit cfaa0d5

Please sign in to comment.