From 85626b3dabd41e5755c4eeb5fb0f556391b8203a Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 11:31:01 -0400 Subject: [PATCH 001/198] Fix model path. --- gpt4all-chat/qml/ModelDownloaderDialog.qml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gpt4all-chat/qml/ModelDownloaderDialog.qml b/gpt4all-chat/qml/ModelDownloaderDialog.qml index 6874aac82f42..b393171b21f5 100644 --- a/gpt4all-chat/qml/ModelDownloaderDialog.qml +++ b/gpt4all-chat/qml/ModelDownloaderDialog.qml @@ -401,9 +401,9 @@ MyDialog { FolderDialog { id: modelPathDialog title: "Please choose a directory" - currentFolder: "file://" + MySettings.modelsPath + currentFolder: "file://" + MySettings.modelPath onAccepted: { - MySettings.modelsPath = selectedFolder + MySettings.modelPath = selectedFolder } } Label { @@ -424,7 +424,7 @@ MyDialog { Accessible.description: ToolTip.text onEditingFinished: { if (isValid) { - MySettings.modelsPath = modelPathDisplayField.text + MySettings.modelPath = modelPathDisplayField.text } else { text = MySettings.modelPath } From 58d6f40f5086cc7d57ea6378086acc5944496ab8 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 11:50:44 -0400 Subject: [PATCH 002/198] Fix broken installs. --- gpt4all-chat/mysettings.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index a318e06a836c..6d40d76a6af8 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -421,6 +421,16 @@ QString MySettings::modelPath() const { QSettings setting; setting.sync(); + // We have to migrate the old setting because I changed the setting key recklessly in v2.4.11 + // which broke a lot of existing installs + const bool containsOldSetting = setting.contains("modelPaths"); + if (containsOldSetting) { + const bool containsNewSetting = setting.contains("modelPath"); + if (!containsNewSetting) + setting.setValue("modelPath", setting.value("modelPaths")); + setting.remove("modelPaths"); + setting.sync(); + } return setting.value("modelPath", defaultLocalModelsPath()).toString(); } From d9f0245c1b0c52eca42a2dcb08f6080a1967dfcc Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 13:05:06 -0400 Subject: [PATCH 003/198] Fix problems with browse of folder in settings dialog. --- gpt4all-chat/qml/ApplicationSettings.qml | 14 +++++--------- gpt4all-chat/qml/LocalDocsSettings.qml | 13 +++---------- gpt4all-chat/qml/MySettingsStack.qml | 14 ++++++++++++++ gpt4all-chat/qml/MySettingsTab.qml | 1 + 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/gpt4all-chat/qml/ApplicationSettings.qml b/gpt4all-chat/qml/ApplicationSettings.qml index 4e475ac3b5ff..b0208aac6a52 100644 --- a/gpt4all-chat/qml/ApplicationSettings.qml +++ b/gpt4all-chat/qml/ApplicationSettings.qml @@ -53,14 +53,6 @@ MySettingsTab { MySettings.userDefaultModel = comboBox.currentText } } - FolderDialog { - id: modelPathDialog - title: "Please choose a directory" - currentFolder: "file://" + MySettings.modelPath - onAccepted: { - MySettings.modelPath = selectedFolder - } - } Label { id: modelPathLabel text: qsTr("Download path:") @@ -93,7 +85,11 @@ MySettingsTab { Layout.column: 2 text: qsTr("Browse") Accessible.description: qsTr("Opens a folder picker dialog to choose where to save model files") - onClicked: modelPathDialog.open() + onClicked: { + openFolderDialog("file://" + MySettings.modelPath, function(selectedFolder) { + MySettings.modelPath = selectedFolder + }) + } } Label { id: nThreadsLabel diff --git a/gpt4all-chat/qml/LocalDocsSettings.qml b/gpt4all-chat/qml/LocalDocsSettings.qml index 242be8e16cb8..a1719230ea8b 100644 --- a/gpt4all-chat/qml/LocalDocsSettings.qml +++ b/gpt4all-chat/qml/LocalDocsSettings.qml @@ -21,15 +21,6 @@ MySettingsTab { property alias collection: collection.text property alias folder_path: folderEdit.text - FolderDialog { - id: folderDialog - title: "Please choose a directory" - currentFolder: StandardPaths.writableLocation(StandardPaths.HomeLocation) - onAccepted: { - root.folder_path = selectedFolder - } - } - Item { Layout.fillWidth: true height: row.height @@ -79,7 +70,9 @@ MySettingsTab { id: browseButton text: qsTr("Browse") onClicked: { - folderDialog.open(); + openFolderDialog(StandardPaths.writableLocation(StandardPaths.HomeLocation), function(selectedFolder) { + root.folder_path = selectedFolder + }) } } diff --git a/gpt4all-chat/qml/MySettingsStack.qml b/gpt4all-chat/qml/MySettingsStack.qml index fba9fe07a521..1d179c3842ae 100644 --- a/gpt4all-chat/qml/MySettingsStack.qml +++ b/gpt4all-chat/qml/MySettingsStack.qml @@ -3,6 +3,8 @@ import QtQuick import QtQuick.Controls import QtQuick.Controls.Basic import QtQuick.Layouts +import QtQuick.Dialogs +import Qt.labs.folderlistmodel import mysettings Item { @@ -91,6 +93,17 @@ Item { border.color: theme.tabBorder } + FolderDialog { + id: folderDialog + title: qsTr("Please choose a directory") + } + + function openFolderDialog(currentFolder, onAccepted) { + folderDialog.currentFolder = currentFolder; + folderDialog.accepted.connect(function() { onAccepted(folderDialog.currentFolder); }); + folderDialog.open(); + } + StackLayout { id: stackLayout anchors.top: tabTitlesModel.count > 1 ? dividerTabBar.bottom : titleLabel.bottom @@ -106,6 +119,7 @@ Item { sourceComponent: model.modelData onLoaded: { settingsStack.tabTitlesModel.append({ "title": loader.item.title }); + item.openFolderDialog = settingsStack.openFolderDialog; } } } diff --git a/gpt4all-chat/qml/MySettingsTab.qml b/gpt4all-chat/qml/MySettingsTab.qml index 9593564e30b7..fd7ba83db8c4 100644 --- a/gpt4all-chat/qml/MySettingsTab.qml +++ b/gpt4all-chat/qml/MySettingsTab.qml @@ -9,6 +9,7 @@ Item { property string title: "" property Item contentItem: null property Item advancedSettings: null + property var openFolderDialog signal restoreDefaultsClicked onContentItemChanged: function() { From e2458454d3e8de959be438674fadb4ebccc5b394 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 13:33:07 -0400 Subject: [PATCH 004/198] Bump to v2.4.12 and new release notes. --- gpt4all-chat/CMakeLists.txt | 2 +- gpt4all-chat/metadata/release.json | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index f0baba2c7d68..b98989e2ff9a 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -17,7 +17,7 @@ endif() set(APP_VERSION_MAJOR 2) set(APP_VERSION_MINOR 4) -set(APP_VERSION_PATCH 12) +set(APP_VERSION_PATCH 13) set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}") # Include the binary directory for the generated header file diff --git a/gpt4all-chat/metadata/release.json b/gpt4all-chat/metadata/release.json index de0b21716ea3..7efd7d1e3217 100644 --- a/gpt4all-chat/metadata/release.json +++ b/gpt4all-chat/metadata/release.json @@ -400,6 +400,22 @@ * Aaron Miller (Nomic AI) * Adam Treat (Nomic AI) * Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.12", + "notes": +" +* Fix bad bug that was breaking numerous current installs (sorry folks!) +* Fix bug with 'browse' button in settings dialog +* Wayland support on linux +* Reduce template ui size in settings dialog +", + "contributors": +" +* Akarshan Biswas +* Adam Treat (Nomic AI) +* Community (beta testers, bug reporters) " } ] From 59f3c093cb5efdd6deba0c5fa9feee55e099a998 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 14:42:11 -0400 Subject: [PATCH 005/198] Stop generating anything on shutdown. --- gpt4all-chat/chatllm.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 065946b182e3..5da22aeead75 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -90,6 +90,7 @@ ChatLLM::ChatLLM(Chat *parent, bool isServer) ChatLLM::~ChatLLM() { + m_stopGenerating = true; m_llmThread.quit(); m_llmThread.wait(); @@ -588,7 +589,7 @@ bool ChatLLM::handleNamePrompt(int32_t token) { Q_UNUSED(token); qt_noop(); - return true; + return !m_stopGenerating; } bool ChatLLM::handleNameResponse(int32_t token, const std::string &response) @@ -606,28 +607,26 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc) { Q_UNUSED(isRecalc); Q_UNREACHABLE(); - return true; + return !m_stopGenerating; } bool ChatLLM::handleSystemPrompt(int32_t token) { Q_UNUSED(token); - qt_noop(); - return true; + return !m_stopGenerating; } bool ChatLLM::handleSystemResponse(int32_t token, const std::string &response) { Q_UNUSED(token); Q_UNUSED(response); - return false; + return !m_stopGenerating; } bool ChatLLM::handleSystemRecalculate(bool isRecalc) { Q_UNUSED(isRecalc); - Q_UNREACHABLE(); - return true; + return !m_stopGenerating; } bool ChatLLM::serialize(QDataStream &stream, int version) @@ -757,6 +756,7 @@ void ChatLLM::processSystemPrompt() if (!isModelLoaded() || m_processedSystemPrompt || m_isServer) return; + m_stopGenerating = false; auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1); auto responseFunc = std::bind(&ChatLLM::handleSystemResponse, this, std::placeholders::_1, std::placeholders::_2); From 12083fcdeb7ed234f6142801be8fc3ab55749096 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 14:52:08 -0400 Subject: [PATCH 006/198] When deleting chats we sometimes have to update our modelinfo. --- gpt4all-chat/chatllm.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 5da22aeead75..76b8e1e621e6 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -188,6 +188,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) #endif restoreState(); emit isModelLoadedChanged(true); + setModelInfo(modelInfo); Q_ASSERT(!m_modelInfo.filename().isEmpty()); if (m_modelInfo.filename().isEmpty()) emit modelLoadingError(QString("Modelinfo is left null for %1").arg(modelInfo.filename())); From 15d04a79169a63a6930a01f2eae91e8701f4a07e Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 14:56:54 -0400 Subject: [PATCH 007/198] Fix new version dialog ui. --- gpt4all-chat/qml/MyDialog.qml | 2 ++ gpt4all-chat/qml/NewVersionDialog.qml | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/qml/MyDialog.qml b/gpt4all-chat/qml/MyDialog.qml index da39c8219e7f..6923a706b830 100644 --- a/gpt4all-chat/qml/MyDialog.qml +++ b/gpt4all-chat/qml/MyDialog.qml @@ -7,6 +7,7 @@ import QtQuick.Layouts Dialog { id: myDialog + property alias closeButtonVisible: myCloseButton.visible background: Rectangle { width: parent.width height: parent.height @@ -17,6 +18,7 @@ Dialog { } MyToolButton { + id: myCloseButton x: 0 + myDialog.width - myDialog.padding - width - 15 y: 0 - myDialog.padding + 15 z: 300 diff --git a/gpt4all-chat/qml/NewVersionDialog.qml b/gpt4all-chat/qml/NewVersionDialog.qml index ebfdeec9dedc..9fb80e742301 100644 --- a/gpt4all-chat/qml/NewVersionDialog.qml +++ b/gpt4all-chat/qml/NewVersionDialog.qml @@ -13,7 +13,8 @@ MyDialog { modal: true width: contentItem.width height: contentItem.height - padding: 10 + padding: 20 + closeButtonVisible: false Theme { id: theme From fb172a25242dc2a19dd3fefe9e5b305eae41b7ab Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 14:58:55 -0400 Subject: [PATCH 008/198] Don't prevent closing the model download dialog. --- gpt4all-chat/qml/ModelDownloaderDialog.qml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/qml/ModelDownloaderDialog.qml b/gpt4all-chat/qml/ModelDownloaderDialog.qml index b393171b21f5..000e05f227b4 100644 --- a/gpt4all-chat/qml/ModelDownloaderDialog.qml +++ b/gpt4all-chat/qml/ModelDownloaderDialog.qml @@ -14,7 +14,7 @@ import mysettings MyDialog { id: modelDownloaderDialog modal: true - closePolicy: ModelList.installedModels.count === 0 ? Popup.NoAutoClose : (Popup.CloseOnEscape | Popup.CloseOnPressOutside) + closePolicy: Popup.CloseOnEscape | Popup.CloseOnPressOutside padding: 10 onOpened: { From e120eb50088c6a59101443e4de8028bcbfa2755f Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 15:08:14 -0400 Subject: [PATCH 009/198] Allow closing the download dialog and display a message to the user if no models are installed. --- gpt4all-chat/main.qml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 83ad8300f49d..3a188cbe6a6c 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -87,6 +87,8 @@ Window { } } + property bool hasShownModelDownload: false + function startupDialogs() { if (!LLM.compatHardware) { Network.sendNonCompatHardware(); @@ -100,9 +102,10 @@ Window { return; } - // check for any current models and if not, open download dialog - if (ModelList.installedModels.count === 0 && !firstStartDialog.opened) { + // check for any current models and if not, open download dialog once + if (!hasShownModelDownload && ModelList.installedModels.count === 0 && !firstStartDialog.opened) { downloadNewModels.open(); + hasShownModelDownload = true; return; } @@ -654,8 +657,18 @@ Window { anchors.fill: parent color: currentChat.isServer ? theme.backgroundDark : theme.backgroundLight + Text { + text: qsTr("You must install a model via the download dialog to continue. The download dialog can be accessed via the drawer button in the top left corner and then clicking the 'Downloads' button.") + color: theme.textColor + width: 500 + wrapMode: Text.WordWrap + anchors.centerIn: parent + visible: ModelList.installedModels.count === 0 + } + ListView { id: listView + visible: ModelList.installedModels.count !== 0 anchors.fill: parent model: chatModel From c8d761a0040c19108b61443cbb1b3a13d393d9fb Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 15:51:59 -0400 Subject: [PATCH 010/198] Add a nicer message. --- gpt4all-chat/CMakeLists.txt | 1 + gpt4all-chat/icons/download.svg | 5 +++++ gpt4all-chat/icons/question.svg | 8 +++++++ gpt4all-chat/main.qml | 38 +++++++++++++++++++++++++++++++-- 4 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 gpt4all-chat/icons/download.svg create mode 100644 gpt4all-chat/icons/question.svg diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index b98989e2ff9a..cfd68ae0aa3a 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -122,6 +122,7 @@ qt_add_qml_module(chat icons/close.svg icons/copy.svg icons/db.svg + icons/download.svg icons/settings.svg icons/edit.svg icons/image.svg diff --git a/gpt4all-chat/icons/download.svg b/gpt4all-chat/icons/download.svg new file mode 100644 index 000000000000..76f56c3f9167 --- /dev/null +++ b/gpt4all-chat/icons/download.svg @@ -0,0 +1,5 @@ + + diff --git a/gpt4all-chat/icons/question.svg b/gpt4all-chat/icons/question.svg new file mode 100644 index 000000000000..7cf18fdd09b1 --- /dev/null +++ b/gpt4all-chat/icons/question.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 3a188cbe6a6c..94f86e4b6a45 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -658,12 +658,46 @@ Window { color: currentChat.isServer ? theme.backgroundDark : theme.backgroundLight Text { - text: qsTr("You must install a model via the download dialog to continue. The download dialog can be accessed via the drawer button in the top left corner and then clicking the 'Downloads' button.") + id: warningLabel + text: qsTr("You must install a model to continue. Models are available via the download dialog or you can install them manually by downloading from the GPT4All website (look for the Models Explorer) and placing them in the model folder. The model folder can be found in the settings dialog under the application tab.") color: theme.textColor - width: 500 + width: 600 + linkColor: theme.linkColor wrapMode: Text.WordWrap anchors.centerIn: parent visible: ModelList.installedModels.count === 0 + onLinkActivated: function(link) { + Qt.openUrlExternally(link) + } + } + + MyButton { + id: downloadButton + text: qsTr("Download models") + visible: ModelList.installedModels.count === 0 + anchors.top: warningLabel.bottom + anchors.topMargin: 20 + anchors.horizontalCenter: warningLabel.horizontalCenter + padding: 15 + leftPadding: 50 + Image { + anchors.verticalCenter: parent.verticalCenter + anchors.left: parent.left + anchors.leftMargin: 15 + width: 24 + height: 24 + mipmap: true + source: "qrc:/gpt4all/icons/download.svg" + } + background: Rectangle { + border.color: downloadButton.down ? theme.backgroundLightest : theme.buttonBorder + border.width: 2 + radius: 10 + color: downloadButton.hovered ? theme.backgroundLighter : theme.backgroundLight + } + onClicked: { + downloadNewModels.open(); + } } ListView { From 98dd2ab4bc950dc9b87c227cbd9fc07de608ebfe Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 10 Jul 2023 16:14:57 -0400 Subject: [PATCH 011/198] Provide backup options if models.json does not download synchronously. --- gpt4all-chat/modellist.cpp | 92 ++++++++++++++++++++++++++++++++++++-- gpt4all-chat/modellist.h | 6 ++- 2 files changed, 94 insertions(+), 4 deletions(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index b705a4bf086a..5a7deed66684 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -2,6 +2,8 @@ #include "mysettings.h" #include "network.h" +#include +#include #include //#define USE_LOCAL_MODELSJSON @@ -241,6 +243,7 @@ ModelList::ModelList() connect(MySettings::globalInstance(), &MySettings::repeatPenaltyTokensChanged, this, &ModelList::updateDataForSettings);; connect(MySettings::globalInstance(), &MySettings::promptTemplateChanged, this, &ModelList::updateDataForSettings); connect(MySettings::globalInstance(), &MySettings::systemPromptChanged, this, &ModelList::updateDataForSettings); + connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this, &ModelList::handleSslErrors); updateModelsFromJson(); updateModelsFromSettings(); @@ -390,6 +393,21 @@ void ModelList::addModel(const QString &id) emit userDefaultModelListChanged(); } +void ModelList::changeId(const QString &oldId, const QString &newId) +{ + const bool hasModel = contains(oldId); + Q_ASSERT(hasModel); + if (!hasModel) { + qWarning() << "ERROR: model list does not contain" << oldId; + return; + } + + QMutexLocker locker(&m_mutex); + ModelInfo *info = m_modelMap.take(oldId); + info->setId(newId); + m_modelMap.insert(newId, info); +} + int ModelList::rowCount(const QModelIndex &parent) const { Q_UNUSED(parent) @@ -857,13 +875,60 @@ void ModelList::updateModelsFromJson() if (jsonReply->error() == QNetworkReply::NoError && jsonReply->isFinished()) { QByteArray jsonData = jsonReply->readAll(); jsonReply->deleteLater(); - parseModelsJsonFile(jsonData); + parseModelsJsonFile(jsonData, true); } else { - qWarning() << "Could not download models.json"; + qWarning() << "WARNING: Could not download models.json synchronously"; + updateModelsFromJsonAsync(); + + QSettings settings; + QFileInfo info(settings.fileName()); + QString dirPath = info.canonicalPath(); + const QString modelsConfig = dirPath + "/models.json"; + QFile file(modelsConfig); + if (!file.open(QIODeviceBase::ReadOnly)) { + qWarning() << "ERROR: Couldn't read models config file: " << modelsConfig; + } else { + QByteArray jsonData = file.readAll(); + file.close(); + parseModelsJsonFile(jsonData, false); + } } delete jsonReply; } +void ModelList::updateModelsFromJsonAsync() +{ +#if defined(USE_LOCAL_MODELSJSON) + QUrl jsonUrl("file://" + QDir::homePath() + "/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models.json"); +#else + QUrl jsonUrl("http://gpt4all.io/models/models.json"); +#endif + QNetworkRequest request(jsonUrl); + QSslConfiguration conf = request.sslConfiguration(); + conf.setPeerVerifyMode(QSslSocket::VerifyNone); + request.setSslConfiguration(conf); + QNetworkReply *jsonReply = m_networkManager.get(request); + connect(jsonReply, &QNetworkReply::finished, this, &ModelList::handleModelsJsonDownloadFinished); +} + +void ModelList::handleModelsJsonDownloadFinished() +{ + QNetworkReply *jsonReply = qobject_cast(sender()); + if (!jsonReply) + return; + + QByteArray jsonData = jsonReply->readAll(); + jsonReply->deleteLater(); + parseModelsJsonFile(jsonData, true); +} + +void ModelList::handleSslErrors(QNetworkReply *reply, const QList &errors) +{ + QUrl url = reply->request().url(); + for (const auto &e : errors) + qWarning() << "ERROR: Received ssl error:" << e.errorString() << "for" << url; +} + void ModelList::updateDataForSettings() { emit dataChanged(index(0, 0), index(m_models.size() - 1, 0)); @@ -887,7 +952,7 @@ static bool compareVersions(const QString &a, const QString &b) { return aParts.size() > bParts.size(); } -void ModelList::parseModelsJsonFile(const QByteArray &jsonData) +void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) { QJsonParseError err; QJsonDocument document = QJsonDocument::fromJson(jsonData, &err); @@ -896,6 +961,20 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData) return; } + if (save) { + QSettings settings; + QFileInfo info(settings.fileName()); + QString dirPath = info.canonicalPath(); + const QString modelsConfig = dirPath + "/models.json"; + QFile file(modelsConfig); + if (!file.open(QIODeviceBase::WriteOnly)) { + qWarning() << "ERROR: Couldn't write models config file: " << modelsConfig; + } else { + file.write(jsonData.constData()); + file.close(); + } + } + QJsonArray jsonArray = document.array(); const QString currentVersion = QCoreApplication::applicationVersion(); @@ -936,6 +1015,9 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData) const QString id = modelName; Q_ASSERT(!id.isEmpty()); + if (contains(modelFilename)) + changeId(modelFilename, id); + if (!contains(id)) addModel(id); @@ -983,6 +1065,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData) const QString modelName = "ChatGPT-3.5 Turbo"; const QString id = modelName; const QString modelFilename = "chatgpt-gpt-3.5-turbo.txt"; + if (contains(modelFilename)) + changeId(modelFilename, id); if (!contains(id)) addModel(id); updateData(id, ModelList::NameRole, modelName); @@ -1003,6 +1087,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData) const QString modelName = "ChatGPT-4"; const QString id = modelName; const QString modelFilename = "chatgpt-gpt-4.txt"; + if (contains(modelFilename)) + changeId(modelFilename, id); if (!contains(id)) addModel(id); updateData(id, ModelList::NameRole, modelName); diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index 89c68229e7f7..c749254c46f4 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -275,6 +275,7 @@ class ModelList : public QAbstractListModel ModelInfo defaultModelInfo() const; void addModel(const QString &id); + void changeId(const QString &oldId, const QString &newId); const QList exportModelList() const; const QList userDefaultModelList() const; @@ -304,16 +305,19 @@ class ModelList : public QAbstractListModel private Q_SLOTS: void updateModelsFromJson(); + void updateModelsFromJsonAsync(); void updateModelsFromSettings(); void updateModelsFromDirectory(); void updateDataForSettings(); + void handleModelsJsonDownloadFinished(); + void handleSslErrors(QNetworkReply *reply, const QList &errors); private: QString modelDirPath(const QString &modelName, bool isChatGPT); int indexForModel(ModelInfo *model); QVariant dataInternal(const ModelInfo *info, int role) const; static bool lessThan(const ModelInfo* a, const ModelInfo* b); - void parseModelsJsonFile(const QByteArray &jsonData); + void parseModelsJsonFile(const QByteArray &jsonData, bool save); QString uniqueModelName(const ModelInfo &model) const; private: From 3e3b05a2a4f0a5c9d978b0a6b3876ffd21d982b9 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 10 Jul 2023 16:20:19 -0400 Subject: [PATCH 012/198] Don't process the system prompt when restoring state. --- gpt4all-chat/chatllm.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 76b8e1e621e6..cdb89fe8c8a8 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -746,6 +746,7 @@ void ChatLLM::restoreState() #if defined(DEBUG) qDebug() << "restoreState" << m_llmThread.objectName() << "size:" << m_state.size(); #endif + m_processedSystemPrompt = true; m_llModelInfo.model->restoreState(static_cast(reinterpret_cast(m_state.data()))); m_state.clear(); m_state.resize(0); From a190041c6e8abc37aac0c78395398bcfbeab73cb Mon Sep 17 00:00:00 2001 From: Lakshay Kansal <58596666+lakkn@users.noreply.github.com> Date: Mon, 10 Jul 2023 16:23:32 -0400 Subject: [PATCH 013/198] json and c# highlighting rules (#1163) --- gpt4all-chat/responsetext.cpp | 46 +++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/gpt4all-chat/responsetext.cpp b/gpt4all-chat/responsetext.cpp index 8dfa6562c9a1..9ca996a233ea 100644 --- a/gpt4all-chat/responsetext.cpp +++ b/gpt4all-chat/responsetext.cpp @@ -15,7 +15,9 @@ enum Language { Bash, TypeScript, Java, - Go + Go, + Json, + Csharp, }; static QColor keywordColor = "#2e95d3"; // blue @@ -29,6 +31,8 @@ static QColor typeColor = numberColor; static QColor arrowColor = functionColor; static QColor commandColor = functionCallColor; static QColor variableColor = numberColor; +static QColor keyColor = functionColor; +static QColor valueColor = stringColor; static Language stringToLanguage(const QString &language) { @@ -38,6 +42,10 @@ static Language stringToLanguage(const QString &language) return Cpp; if (language == "c++") return Cpp; + if (language == "csharp") + return Csharp; + if (language == "c#") + return Csharp; if (language == "c") return Cpp; if (language == "bash") @@ -52,6 +60,8 @@ static Language stringToLanguage(const QString &language) return Go; if (language == "golang") return Go; + if (language == "json") + return Json; return None; } @@ -551,6 +561,31 @@ static QVector bashHighlightingRules() return highlightingRules; } +static QVector jsonHighlightingRules() +{ + static QVector highlightingRules; + if (highlightingRules.isEmpty()) { + + HighlightingRule rule; + + // Key string rule + QTextCharFormat keyFormat; + keyFormat.setForeground(keyColor); // Assuming keyColor is defined + rule.pattern = QRegularExpression("\".*?\":"); // keys are typically in the "key": format + rule.format = keyFormat; + highlightingRules.append(rule); + + // Value string rule + QTextCharFormat valueFormat; + valueFormat.setForeground(valueColor); // Assuming valueColor is defined + rule.pattern = QRegularExpression(":\\s*(\".*?\")"); // values are typically in the : "value" format + rule.format = valueFormat; + highlightingRules.append(rule); + } + return highlightingRules; +} + + SyntaxHighlighter::SyntaxHighlighter(QObject *parent) : QSyntaxHighlighter(parent) { @@ -569,6 +604,8 @@ void SyntaxHighlighter::highlightBlock(const QString &text) rules = pythonHighlightingRules(); else if (block.userState() == Cpp) rules = cppHighlightingRules(); + else if (block.userState() == Csharp) + rules = csharpHighlightingRules(); else if (block.userState() == Bash) rules = bashHighlightingRules(); else if (block.userState() == TypeScript) @@ -577,6 +614,8 @@ void SyntaxHighlighter::highlightBlock(const QString &text) rules = javaHighlightingRules(); else if (block.userState() == Go) rules = javaHighlightingRules(); + else if (block.userState() == Json) + rules = jsonHighlightingRules(); for (const HighlightingRule &rule : qAsConst(rules)) { QRegularExpressionMatchIterator matchIterator = rule.pattern.globalMatch(text); @@ -773,13 +812,16 @@ void ResponseText::handleCodeBlocks() if (firstWord == "python" || firstWord == "cpp" || firstWord == "c++" + || firstWord == "csharp" + || firstWord == "c#" || firstWord == "c" || firstWord == "bash" || firstWord == "javascript" || firstWord == "typescript" || firstWord == "java" || firstWord == "go" - || firstWord == "golang") { + || firstWord == "golang" + || firstWord == "json") { codeLanguage = firstWord; capturedText.remove(0, match.captured(0).length()); } From 99cd5557431f361cb61f6398541dba587c308551 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 10 Jul 2023 16:52:32 -0400 Subject: [PATCH 014/198] Provide some guardrails for thread count. --- gpt4all-chat/chatllm.cpp | 4 ---- gpt4all-chat/mysettings.cpp | 9 +++++++-- gpt4all-chat/qml/ApplicationSettings.qml | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index cdb89fe8c8a8..4f87d15047ac 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -461,8 +461,6 @@ bool ChatLLM::promptInternal(const QList &collectionList, const QString QString instructPrompt = augmentedTemplate.join("\n").arg(prompt); int n_threads = MySettings::globalInstance()->threadCount(); - if (n_threads <= 0) - n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); m_stopGenerating = false; auto promptFunc = std::bind(&ChatLLM::handlePrompt, this, std::placeholders::_1); @@ -773,8 +771,6 @@ void ChatLLM::processSystemPrompt() const float repeat_penalty = MySettings::globalInstance()->modelRepeatPenalty(m_modelInfo); const int32_t repeat_penalty_tokens = MySettings::globalInstance()->modelRepeatPenaltyTokens(m_modelInfo); int n_threads = MySettings::globalInstance()->threadCount(); - if (n_threads <= 0) - n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); m_ctx.n_predict = n_predict; m_ctx.top_k = top_k; m_ctx.top_p = top_p; diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index 6d40d76a6af8..9a4e17ffb787 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -8,7 +8,7 @@ #include #include -static int default_threadCount = 0; +static int default_threadCount = std::min(4, (int32_t) std::thread::hardware_concurrency()); static bool default_saveChats = false; static bool default_saveChatGPTChats = true; static bool default_serverChat = false; @@ -349,7 +349,10 @@ int MySettings::threadCount() const { QSettings setting; setting.sync(); - return setting.value("threadCount", default_threadCount).toInt(); + int c = setting.value("threadCount", default_threadCount).toInt(); + c = std::max(c, 1); + c = std::min(c, QThread::idealThreadCount()); + return c; } void MySettings::setThreadCount(int c) @@ -357,6 +360,8 @@ void MySettings::setThreadCount(int c) if (threadCount() == c) return; + c = std::max(c, 1); + c = std::min(c, QThread::idealThreadCount()); QSettings setting; setting.setValue("threadCount", c); setting.sync(); diff --git a/gpt4all-chat/qml/ApplicationSettings.qml b/gpt4all-chat/qml/ApplicationSettings.qml index b0208aac6a52..15333b7ce5bd 100644 --- a/gpt4all-chat/qml/ApplicationSettings.qml +++ b/gpt4all-chat/qml/ApplicationSettings.qml @@ -101,7 +101,7 @@ MySettingsTab { MyTextField { text: MySettings.threadCount color: theme.textColor - ToolTip.text: qsTr("Amount of processing threads to use, a setting of 0 will use the lesser of 4 or your number of CPU threads") + ToolTip.text: qsTr("Amount of processing threads to use bounded by 1 and number of logical processors") ToolTip.visible: hovered Layout.row: 3 Layout.column: 1 From 8467e69f24b9acdb5d150dc98761f1fd80623003 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Mon, 10 Jul 2023 17:29:21 -0400 Subject: [PATCH 015/198] Check that we're not null. This is necessary because the loop can make us recursive. Need to fix that. --- gpt4all-chat/chatgpt.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index 5d378930f99c..d9f6114ee3b8 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -134,7 +134,8 @@ void ChatGPT::prompt(const std::string &prompt, qDebug() << "ChatGPT::prompt end network request"; #endif - m_ctx->n_past += 1; + if (m_ctx) + m_ctx->n_past += 1; m_context.append(QString::fromStdString(prompt)); m_context.append(m_currentResponse); From 4f9e48909387ae4cb70398f91e32356187bf77bf Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 10:08:03 -0400 Subject: [PATCH 016/198] Don't use a local event loop which can lead to recursion and crashes. --- gpt4all-chat/chatgpt.cpp | 96 ++++++++++++++++++++++++++-------------- gpt4all-chat/chatgpt.h | 49 ++++++++++++++++---- 2 files changed, 103 insertions(+), 42 deletions(-) diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index d9f6114ee3b8..2b72604d56cb 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -15,7 +15,6 @@ ChatGPT::ChatGPT() : QObject(nullptr) , m_modelName("gpt-3.5-turbo") - , m_ctx(nullptr) , m_responseCallback(nullptr) { } @@ -84,9 +83,6 @@ void ChatGPT::prompt(const std::string &prompt, return; } - m_ctx = &promptCtx; - m_responseCallback = responseCallback; - // FIXME: We don't set the max_tokens on purpose because in order to do so safely without encountering // an error we need to be able to count the tokens in our prompt. The only way to do this is to use // the OpenAI tiktokken library or to implement our own tokenization function that matches precisely @@ -118,37 +114,64 @@ void ChatGPT::prompt(const std::string &prompt, qDebug() << "ChatGPT::prompt begin network request" << qPrintable(doc.toJson()); #endif - QEventLoop loop; - QUrl openaiUrl("https://api.openai.com/v1/chat/completions"); - const QString authorization = QString("Bearer %1").arg(m_apiKey); - QNetworkRequest request(openaiUrl); - request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); - request.setRawHeader("Authorization", authorization.toUtf8()); - QNetworkReply *reply = m_networkManager.post(request, doc.toJson(QJsonDocument::Compact)); - connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit); - connect(reply, &QNetworkReply::finished, this, &ChatGPT::handleFinished); - connect(reply, &QNetworkReply::readyRead, this, &ChatGPT::handleReadyRead); - connect(reply, &QNetworkReply::errorOccurred, this, &ChatGPT::handleErrorOccurred); - loop.exec(); + m_responseCallback = responseCallback; + + // The following code sets up a worker thread and object to perform the actual api request to + // chatgpt and then blocks until it is finished + QThread workerThread; + ChatGPTWorker worker(this); + worker.moveToThread(&workerThread); + connect(&worker, &ChatGPTWorker::finished, &workerThread, &QThread::quit, Qt::DirectConnection); + connect(this, &ChatGPT::request, &worker, &ChatGPTWorker::request, Qt::QueuedConnection); + workerThread.start(); + emit request(m_apiKey, &promptCtx, doc.toJson(QJsonDocument::Compact)); + workerThread.wait(); + + promptCtx.n_past += 1; + m_context.append(QString::fromStdString(prompt)); + m_context.append(worker.currentResponse()); + m_responseCallback = nullptr; + #if defined(DEBUG) qDebug() << "ChatGPT::prompt end network request"; #endif +} - if (m_ctx) - m_ctx->n_past += 1; - m_context.append(QString::fromStdString(prompt)); - m_context.append(m_currentResponse); +bool ChatGPT::callResponse(int32_t token, const std::string& string) +{ + Q_ASSERT(m_responseCallback); + if (!m_responseCallback) { + std::cerr << "ChatGPT ERROR: no response callback!\n"; + return false; + } + return m_responseCallback(token, string); +} - m_ctx = nullptr; - m_responseCallback = nullptr; - m_currentResponse = QString(); +void ChatGPTWorker::request(const QString &apiKey, + LLModel::PromptContext *promptCtx, + const QByteArray &array) +{ + m_ctx = promptCtx; + + QUrl openaiUrl("https://api.openai.com/v1/chat/completions"); + const QString authorization = QString("Bearer %1").arg(apiKey); + QNetworkRequest request(openaiUrl); + request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); + request.setRawHeader("Authorization", authorization.toUtf8()); + m_networkManager = new QNetworkAccessManager(this); + QNetworkReply *reply = m_networkManager->post(request, array); + connect(reply, &QNetworkReply::finished, this, &ChatGPTWorker::handleFinished); + connect(reply, &QNetworkReply::readyRead, this, &ChatGPTWorker::handleReadyRead); + connect(reply, &QNetworkReply::errorOccurred, this, &ChatGPTWorker::handleErrorOccurred); } -void ChatGPT::handleFinished() +void ChatGPTWorker::handleFinished() { QNetworkReply *reply = qobject_cast(sender()); - if (!reply) + if (!reply) { + emit finished(); return; + } QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute); Q_ASSERT(response.isValid()); @@ -159,21 +182,25 @@ void ChatGPT::handleFinished() .arg(code).arg(reply->errorString()).toStdString(); } reply->deleteLater(); + emit finished(); } -void ChatGPT::handleReadyRead() +void ChatGPTWorker::handleReadyRead() { QNetworkReply *reply = qobject_cast(sender()); - if (!reply) + if (!reply) { + emit finished(); return; + } QVariant response = reply->attribute(QNetworkRequest::HttpStatusCodeAttribute); Q_ASSERT(response.isValid()); bool ok; int code = response.toInt(&ok); if (!ok || code != 200) { - m_responseCallback(-1, QString("\nERROR: 2 ChatGPT responded with error code \"%1-%2\" %3\n") + m_chat->callResponse(-1, QString("\nERROR: 2 ChatGPT responded with error code \"%1-%2\" %3\n") .arg(code).arg(reply->errorString()).arg(qPrintable(reply->readAll())).toStdString()); + emit finished(); return; } @@ -192,7 +219,7 @@ void ChatGPT::handleReadyRead() QJsonParseError err; const QJsonDocument document = QJsonDocument::fromJson(jsonData.toUtf8(), &err); if (err.error != QJsonParseError::NoError) { - m_responseCallback(-1, QString("\nERROR: ChatGPT responded with invalid json \"%1\"\n") + m_chat->callResponse(-1, QString("\nERROR: ChatGPT responded with invalid json \"%1\"\n") .arg(err.errorString()).toStdString()); continue; } @@ -203,21 +230,24 @@ void ChatGPT::handleReadyRead() const QJsonObject delta = choice.value("delta").toObject(); const QString content = delta.value("content").toString(); Q_ASSERT(m_ctx); - Q_ASSERT(m_responseCallback); m_currentResponse += content; - if (!m_responseCallback(0, content.toStdString())) { + if (!m_chat->callResponse(0, content.toStdString())) { reply->abort(); + emit finished(); return; } } } -void ChatGPT::handleErrorOccurred(QNetworkReply::NetworkError code) +void ChatGPTWorker::handleErrorOccurred(QNetworkReply::NetworkError code) { QNetworkReply *reply = qobject_cast(sender()); - if (!reply) + if (!reply) { + emit finished(); return; + } qWarning() << QString("ERROR: ChatGPT responded with error code \"%1-%2\"") .arg(code).arg(reply->errorString()).toStdString(); + emit finished(); } diff --git a/gpt4all-chat/chatgpt.h b/gpt4all-chat/chatgpt.h index af06a4bbe3f8..b1f322985eb7 100644 --- a/gpt4all-chat/chatgpt.h +++ b/gpt4all-chat/chatgpt.h @@ -5,9 +5,41 @@ #include #include #include +#include #include "../gpt4all-backend/llmodel.h" -class ChatGPTPrivate; +class ChatGPT; +class ChatGPTWorker : public QObject { + Q_OBJECT +public: + ChatGPTWorker(ChatGPT *chatGPT) + : QObject(nullptr) + , m_ctx(nullptr) + , m_networkManager(nullptr) + , m_chat(chatGPT) {} + virtual ~ChatGPTWorker() {} + + QString currentResponse() const { return m_currentResponse; } + + void request(const QString &apiKey, + LLModel::PromptContext *promptCtx, + const QByteArray &array); + +Q_SIGNALS: + void finished(); + +private Q_SLOTS: + void handleFinished(); + void handleReadyRead(); + void handleErrorOccurred(QNetworkReply::NetworkError code); + +private: + ChatGPT *m_chat; + LLModel::PromptContext *m_ctx; + QNetworkAccessManager *m_networkManager; + QString m_currentResponse; +}; + class ChatGPT : public QObject, public LLModel { Q_OBJECT public: @@ -35,6 +67,13 @@ class ChatGPT : public QObject, public LLModel { QList context() const { return m_context; } void setContext(const QList &context) { m_context = context; } + bool callResponse(int32_t token, const std::string& string); + +Q_SIGNALS: + void request(const QString &apiKey, + LLModel::PromptContext *ctx, + const QByteArray &array); + protected: // We have to implement these as they are pure virtual in base class, but we don't actually use // them as they are only called from the default implementation of 'prompt' which we override and @@ -46,19 +85,11 @@ class ChatGPT : public QObject, public LLModel { int32_t contextLength() const override { return -1; } const std::vector& endTokens() const override { static const std::vector fres; return fres; } -private Q_SLOTS: - void handleFinished(); - void handleReadyRead(); - void handleErrorOccurred(QNetworkReply::NetworkError code); - private: - PromptContext *m_ctx; std::function m_responseCallback; QString m_modelName; QString m_apiKey; QList m_context; - QString m_currentResponse; - QNetworkAccessManager m_networkManager; }; #endif // CHATGPT_H From 9ef53163ddd4ffa30d313809c1983f0254b681d6 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 10:53:19 -0400 Subject: [PATCH 017/198] Explicitly send the opt out because we were artificially lowering them with settings changes. --- gpt4all-chat/qml/StartupDialog.qml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gpt4all-chat/qml/StartupDialog.qml b/gpt4all-chat/qml/StartupDialog.qml index bfe725ab3820..ac228a954169 100644 --- a/gpt4all-chat/qml/StartupDialog.qml +++ b/gpt4all-chat/qml/StartupDialog.qml @@ -136,6 +136,8 @@ model release that uses your data!") buttons: optInStatisticsRadio.children onClicked: { MySettings.networkUsageStatsActive = optInStatisticsRadio.checked + if (!optInStatisticsRadio.checked) + Network.sendOptOut(); if (optInNetworkRadio.choiceMade && optInStatisticsRadio.choiceMade) startupDialog.close(); } From 88bbe3095226cb0b5f5ad25110753ee05a15bbc2 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 12:09:33 -0400 Subject: [PATCH 018/198] Provide a guardrail for OOM errors. --- gpt4all-chat/chatllm.cpp | 11 +++++++++++ gpt4all-chat/main.qml | 2 +- gpt4all-chat/mysettings.cpp | 21 +++++++++++++++++++++ gpt4all-chat/mysettings.h | 4 ++++ 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 4f87d15047ac..3a42021b8941 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -211,6 +211,15 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) // Store the file info in the modelInfo in case we have an error loading m_llModelInfo.fileInfo = fileInfo; + // Check if we've previously tried to load this file and failed/crashed + if (MySettings::globalInstance()->attemptModelLoad() == filePath) { + MySettings::globalInstance()->setAttemptModelLoad(QString()); // clear the flag + if (!m_isServer) + LLModelStore::globalInstance()->releaseModel(m_llModelInfo); // release back into the store + m_llModelInfo = LLModelInfo(); + emit modelLoadingError(QString("Previous attempt to load model resulted in crash for `%1` most likely due to out of memory. You should either remove this model or decrease your system RAM by closing other applications.").arg(modelInfo.filename())); + } + if (fileInfo.exists()) { if (isChatGPT) { QString apiKey; @@ -239,7 +248,9 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) #endif if (m_llModelInfo.model) { + MySettings::globalInstance()->setAttemptModelLoad(filePath); bool success = m_llModelInfo.model->loadModel(filePath.toStdString()); + MySettings::globalInstance()->setAttemptModelLoad(QString()); if (!success) { delete std::exchange(m_llModelInfo.model, nullptr); if (!m_isServer) diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 94f86e4b6a45..5bb723bc527c 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -163,7 +163,7 @@ Window { + "\"" + currentChat.modelLoadingError + "\"" + qsTr("

Model loading failures can happen for a variety of reasons, but the most common " + "causes include a bad file format, an incomplete or corrupted download, the wrong file " - + "type or an incompatible model type. Here are some suggestions for resolving the problem:" + + "type, not enough system RAM or an incompatible model type. Here are some suggestions for resolving the problem:" + "
    " + "
  • Ensure the model file has a compatible ggml format and type" + "
  • Check the model file is complete in the download folder" diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index 9a4e17ffb787..71b2334df784 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -608,3 +608,24 @@ void MySettings::setNetworkUsageStatsActive(bool b) setting.sync(); emit networkUsageStatsActiveChanged(); } + +QString MySettings::attemptModelLoad() const +{ + QSettings setting; + setting.sync(); + return setting.value("attemptModelLoad", QString()).toString(); +} + +void MySettings::setAttemptModelLoad(const QString &modelFile) +{ + if (attemptModelLoad() == modelFile) + return; + + QSettings setting; + if (modelFile.isEmpty()) + setting.remove("attemptModelLoad"); + else + setting.setValue("attemptModelLoad", modelFile); + setting.sync(); + emit attemptModelLoadChanged(); +} diff --git a/gpt4all-chat/mysettings.h b/gpt4all-chat/mysettings.h index 873cf5ece47e..2d264666d5f8 100644 --- a/gpt4all-chat/mysettings.h +++ b/gpt4all-chat/mysettings.h @@ -93,6 +93,9 @@ class MySettings : public QObject bool networkUsageStatsActive() const; void setNetworkUsageStatsActive(bool b); + QString attemptModelLoad() const; + void setAttemptModelLoad(const QString &modelFile); + Q_SIGNALS: void nameChanged(const ModelInfo &model); void filenameChanged(const ModelInfo &model); @@ -119,6 +122,7 @@ class MySettings : public QObject void networkAttributionChanged(); void networkIsActiveChanged(); void networkUsageStatsActiveChanged(); + void attemptModelLoadChanged(); private: bool m_forceMetal; From 34a3b9c857e72cf3187be65554ba318dbcf0d229 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 12:37:21 -0400 Subject: [PATCH 019/198] Don't block on exit when not connected. --- gpt4all-chat/chatgpt.cpp | 2 ++ gpt4all-chat/chatllm.cpp | 27 ++++++++++++++++++++++++--- gpt4all-chat/download.cpp | 2 ++ gpt4all-chat/modellist.cpp | 2 ++ gpt4all-chat/network.cpp | 4 ++++ 5 files changed, 34 insertions(+), 3 deletions(-) diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index 2b72604d56cb..13e9cd5db0bb 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -160,6 +161,7 @@ void ChatGPTWorker::request(const QString &apiKey, request.setRawHeader("Authorization", authorization.toUtf8()); m_networkManager = new QNetworkAccessManager(this); QNetworkReply *reply = m_networkManager->post(request, array); + connect(qApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort); connect(reply, &QNetworkReply::finished, this, &ChatGPTWorker::handleFinished); connect(reply, &QNetworkReply::readyRead, this, &ChatGPTWorker::handleReadyRead); connect(reply, &QNetworkReply::errorOccurred, this, &ChatGPTWorker::handleErrorOccurred); diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 3a42021b8941..088083d56be3 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -427,6 +427,9 @@ bool ChatLLM::handleResponse(int32_t token, const std::string &response) bool ChatLLM::handleRecalculate(bool isRecalc) { +#if defined(DEBUG) + qDebug() << "recalculate" << m_llmThread.objectName() << isRecalc; +#endif if (m_isRecalc != isRecalc) { m_isRecalc = isRecalc; emit recalcChanged(); @@ -597,6 +600,9 @@ void ChatLLM::handleChatIdChanged(const QString &id) bool ChatLLM::handleNamePrompt(int32_t token) { +#if defined(DEBUG) + qDebug() << "name prompt" << m_llmThread.objectName() << token; +#endif Q_UNUSED(token); qt_noop(); return !m_stopGenerating; @@ -604,6 +610,9 @@ bool ChatLLM::handleNamePrompt(int32_t token) bool ChatLLM::handleNameResponse(int32_t token, const std::string &response) { +#if defined(DEBUG) + qDebug() << "name response" << m_llmThread.objectName() << token << response; +#endif Q_UNUSED(token); m_nameResponse.append(response); @@ -615,28 +624,40 @@ bool ChatLLM::handleNameResponse(int32_t token, const std::string &response) bool ChatLLM::handleNameRecalculate(bool isRecalc) { +#if defined(DEBUG) + qDebug() << "name recalc" << m_llmThread.objectName() << isRecalc; +#endif Q_UNUSED(isRecalc); Q_UNREACHABLE(); - return !m_stopGenerating; + return false; } bool ChatLLM::handleSystemPrompt(int32_t token) { +#if defined(DEBUG) + qDebug() << "system prompt" << m_llmThread.objectName() << token << m_stopGenerating; +#endif Q_UNUSED(token); return !m_stopGenerating; } bool ChatLLM::handleSystemResponse(int32_t token, const std::string &response) { +#if defined(DEBUG) + qDebug() << "system response" << m_llmThread.objectName() << token << response << m_stopGenerating; +#endif Q_UNUSED(token); Q_UNUSED(response); - return !m_stopGenerating; + return false; } bool ChatLLM::handleSystemRecalculate(bool isRecalc) { +#if defined(DEBUG) + qDebug() << "system recalc" << m_llmThread.objectName() << isRecalc; +#endif Q_UNUSED(isRecalc); - return !m_stopGenerating; + return false; } bool ChatLLM::serialize(QDataStream &stream, int version) diff --git a/gpt4all-chat/download.cpp b/gpt4all-chat/download.cpp index dbcaf6463ad8..f0c0d987f150 100644 --- a/gpt4all-chat/download.cpp +++ b/gpt4all-chat/download.cpp @@ -94,6 +94,7 @@ void Download::updateReleaseNotes() conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *jsonReply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort); connect(jsonReply, &QNetworkReply::finished, this, &Download::handleReleaseJsonDownloadFinished); } @@ -137,6 +138,7 @@ void Download::downloadModel(const QString &modelFile) conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *modelReply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, modelReply, &QNetworkReply::abort); connect(modelReply, &QNetworkReply::downloadProgress, this, &Download::handleDownloadProgress); connect(modelReply, &QNetworkReply::finished, this, &Download::handleModelDownloadFinished); connect(modelReply, &QNetworkReply::readyRead, this, &Download::handleReadyRead); diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 5a7deed66684..e0bb12de1e53 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -868,6 +868,7 @@ void ModelList::updateModelsFromJson() conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *jsonReply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort); QEventLoop loop; connect(jsonReply, &QNetworkReply::finished, &loop, &QEventLoop::quit); QTimer::singleShot(1500, &loop, &QEventLoop::quit); @@ -908,6 +909,7 @@ void ModelList::updateModelsFromJsonAsync() conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *jsonReply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort); connect(jsonReply, &QNetworkReply::finished, this, &ModelList::handleModelsJsonDownloadFinished); } diff --git a/gpt4all-chat/network.cpp b/gpt4all-chat/network.cpp index 61ebfa61de8a..7317e7fe41fa 100644 --- a/gpt4all-chat/network.cpp +++ b/gpt4all-chat/network.cpp @@ -115,6 +115,7 @@ bool Network::packageAndSendJson(const QString &ingestId, const QString &json) QByteArray body(newDoc.toJson(QJsonDocument::Compact)); request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); QNetworkReply *jsonReply = m_networkManager.post(request, body); + connect(qApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort); connect(jsonReply, &QNetworkReply::finished, this, &Network::handleJsonUploadFinished); m_activeUploads.append(jsonReply); return true; @@ -434,6 +435,7 @@ void Network::sendIpify() conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *reply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, reply, &QNetworkReply::abort); connect(reply, &QNetworkReply::finished, this, &Network::handleIpifyFinished); } @@ -449,6 +451,7 @@ void Network::sendMixpanel(const QByteArray &json, bool isOptOut) request.setSslConfiguration(conf); request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); QNetworkReply *trackReply = m_networkManager.post(request, json); + connect(qApp, &QCoreApplication::aboutToQuit, trackReply, &QNetworkReply::abort); connect(trackReply, &QNetworkReply::finished, this, &Network::handleMixpanelFinished); } @@ -512,6 +515,7 @@ void Network::sendHealth() conf.setPeerVerifyMode(QSslSocket::VerifyNone); request.setSslConfiguration(conf); QNetworkReply *healthReply = m_networkManager.get(request); + connect(qApp, &QCoreApplication::aboutToQuit, healthReply, &QNetworkReply::abort); connect(healthReply, &QNetworkReply::finished, this, &Network::handleHealthFinished); } From 6630bf2f136073413147ec545fc4398fc10915cf Mon Sep 17 00:00:00 2001 From: felix Date: Sat, 8 Jul 2023 20:08:17 -0400 Subject: [PATCH 020/198] update to 2.4.11 gpt4all falcon model support. Developer docs included for Java. --- .gitignore | 3 + gpt4all-bindings/java/.gitignore | 5 +- gpt4all-bindings/java/Developer_docs.md | 80 +++++++++++++++++++ gpt4all-bindings/java/README.md | 3 + gpt4all-bindings/java/pom.xml | 11 +-- .../java/com/hexadevlabs/gpt4all/LLModel.java | 8 +- .../java/com/hexadevlabs/gpt4all/Util.java | 3 +- 7 files changed, 100 insertions(+), 13 deletions(-) create mode 100644 gpt4all-bindings/java/Developer_docs.md diff --git a/.gitignore b/.gitignore index 6647893e9461..67cf225f1206 100644 --- a/.gitignore +++ b/.gitignore @@ -178,3 +178,6 @@ CMakeLists.txt.user gpt4all-chat/models/* build_* build-* + +# IntelliJ +.idea/ \ No newline at end of file diff --git a/gpt4all-bindings/java/.gitignore b/gpt4all-bindings/java/.gitignore index 8c3a43d3c4f4..081e799c9983 100644 --- a/gpt4all-bindings/java/.gitignore +++ b/gpt4all-bindings/java/.gitignore @@ -1,2 +1,5 @@ # Make sure native directory never gets commited to git for the project. -/src/main/resources/native \ No newline at end of file +/src/main/resources/native + +# IntelliJ project file +*.iml \ No newline at end of file diff --git a/gpt4all-bindings/java/Developer_docs.md b/gpt4all-bindings/java/Developer_docs.md new file mode 100644 index 000000000000..a90dc68cd4e5 --- /dev/null +++ b/gpt4all-bindings/java/Developer_docs.md @@ -0,0 +1,80 @@ +# Java Bindings Developer documents. + +This document is meant to anyone looking to build the Java bindings from source, test a build locally and perform a release. + +## Building locally + +Maven is the build tool used by the project. Maven version of 3.8 or higher is recommended. Make sure the **mvn** +is available on the command path. + +The project builds to Java version 11 target so make sure that a JDK at version 11 or newer is installed. + +### Setting up location of native shared libraries +The property **native.libs.location** in pom.xml may need to be set: +``` + + ... + C:\Users\felix\dev\gpt4all_java_bins\release_1_1_3_Jun22_2023 + +``` +All the native shared libraries bundled with the Java binding jar will be copied from this location. +The directory structure is **native/linux**, **native/macos**, **native/windows**. These directories are copied +into the **src/main/resources** folder during the build process. + +For the purposes of local testing, none of these directories have to be present or just one OS type may be present. + +If none of the native libraries are present in **native.libs.location** the shared libraries will be searched for +in location path set by **LLModel.LIBRARY_SEARCH_PATH** static variable in Java source code that is using the bindings. + +Alternately you can copy the shared libraries into the **src/resources/native/linux** before +you build, but note **src/main/resources/native** is on the .gitignore, so it will not be committed to sources. + +### Building + +To package the bindings jar run: +``` +mvn package +``` +This will build two jars. One has only the Java bindings and the other is a fat jar that will have required dependencies included as well. + +To package and install the Java bindings to your local maven repository run: +``` +mvn install +``` + +### Using in a sample application + +You can check out a sample project that uses the java bindings here: +https://github.com/felix-zaslavskiy/gpt4all-java-bindings-sample.git + +1. First, update the dependency of java bindings to whatever you have installed in local repository such as **1.1.4-SNAPSHOT** +2. Second, update **Main.java** and set **baseModelPath** to the correct location of model weight files. + +3. To make a runnable jar run: +``` +mvn package +``` + +A fat jar is also created which is easy to run from command line: +``` +java -jar target/gpt4all-java-bindings-sample-1.0-SNAPSHOT-jar-with-dependencies.jar +``` + +### Publish a public release. + +For publishing a new version to maven central repository requires password and signing keys which F.Z. currently maintains, so +he is responsible for making a public release. + +The procedure is as follows: + +For a snapshot release +Run: +``` +mvn deploy -P signing-profile +``` + +For a non-snapshot release +Run: +``` +mvn clean deploy -P signing-profile,release +``` \ No newline at end of file diff --git a/gpt4all-bindings/java/README.md b/gpt4all-bindings/java/README.md index f8e8043065f3..b93008827d72 100644 --- a/gpt4all-bindings/java/README.md +++ b/gpt4all-bindings/java/README.md @@ -118,4 +118,7 @@ If this is the case you can easily download and install the latest x64 Microsoft - Add static GPT4ALL_VERSION to signify gpt4all version of the bindings - Add PromptIsTooLongException for prompts that are longer than context size. - Replit model support to include Metal Mac hardware support. +3. Version **1.1.4**: + - Java bindings is compatible with gpt4all version 2.4.11 + - Falcon model support included. \ No newline at end of file diff --git a/gpt4all-bindings/java/pom.xml b/gpt4all-bindings/java/pom.xml index 36f222e00466..7bfd9c27ab29 100644 --- a/gpt4all-bindings/java/pom.xml +++ b/gpt4all-bindings/java/pom.xml @@ -6,13 +6,14 @@ com.hexadevlabs gpt4all-java-binding - 1.1.3 + 1.1.4 jar 11 11 UTF-8 + C:\Users\felix\dev\gpt4all_java_bins\release_1_1_4_July8_2023 ${project.groupId}:${project.artifactId} @@ -117,7 +118,7 @@ ${project.build.directory}/generated-resources - C:\Users\felix\dev\gpt4all_java_bins\release_1_1_3_Jun22_2023 + ${native.libs.location} @@ -172,11 +173,6 @@ jar-with-dependencies - - - com.hexadevlabs.gpt4allsample.Example4 - - @@ -190,7 +186,6 @@ - diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java index f3d0d674db3d..367f7ec0edf0 100644 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java +++ b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java @@ -117,8 +117,10 @@ public static GenerationConfig.Builder config(){ /** * This may be set before any Model instance classes are instantiated to - * set where the model may be found. This may be needed if setting - * library search path by standard means is not available. + * set where the native shared libraries are to be found. + *

    + * This may be needed if setting library search path by standard means is not available + * or the libraries loaded from the temp folder bundled with the binding jar is not desirable. */ public static String LIBRARY_SEARCH_PATH; @@ -138,7 +140,7 @@ public static GenerationConfig.Builder config(){ * GPT4ALL native libraries. The binding may work for older * versions but that is not guaranteed. */ - public static final String GPT4ALL_VERSION = "2.4.8"; + public static final String GPT4ALL_VERSION = "2.4.11"; protected static LLModelLibrary library; diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java index 4b8a978c4b2f..9c50f9e7e89a 100644 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java +++ b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/Util.java @@ -83,7 +83,8 @@ public static Path copySharedLibraries() { "llamamodel-mainline-metal", "replit-mainline-default", "replit-mainline-metal", - "ggml-metal.metal" + "ggml-metal.metal", + "falcon-default" }; for (String libraryName : libraryNames) { From 18dbfddcb3a7e2797f4cdb35b67801545c3d1531 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 13:07:15 -0400 Subject: [PATCH 021/198] Fix default thread setting. --- gpt4all-chat/mysettings.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gpt4all-chat/mysettings.cpp b/gpt4all-chat/mysettings.cpp index 71b2334df784..4ddcf815dd92 100644 --- a/gpt4all-chat/mysettings.cpp +++ b/gpt4all-chat/mysettings.cpp @@ -350,6 +350,10 @@ int MySettings::threadCount() const QSettings setting; setting.sync(); int c = setting.value("threadCount", default_threadCount).toInt(); + // The old thread setting likely left many people with 0 in settings config file, which means + // we should reset it to the default going forward + if (c <= 0) + c = default_threadCount; c = std::max(c, 1); c = std::min(c, QThread::idealThreadCount()); return c; From 833a56faddc2517ded914fa5bcae9db5dcf75e2a Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 14:58:54 -0400 Subject: [PATCH 022/198] Fix the tap handler on these buttons. --- gpt4all-chat/qml/SettingsDialog.qml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/gpt4all-chat/qml/SettingsDialog.qml b/gpt4all-chat/qml/SettingsDialog.qml index e0c3d1899357..2fd0d04d101c 100644 --- a/gpt4all-chat/qml/SettingsDialog.qml +++ b/gpt4all-chat/qml/SettingsDialog.qml @@ -72,10 +72,11 @@ MyDialog { elide: Text.ElideRight color: theme.textColor width: 200 - TapHandler { - onTapped: { - listView.currentIndex = index - } + } + + TapHandler { + onTapped: { + listView.currentIndex = index } } } From 9dccc96e705f70b747fd511e1d2d9ae42cbbe364 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 15:10:59 -0400 Subject: [PATCH 023/198] Immediately signal when the model is in a new loading state. --- gpt4all-chat/chat.cpp | 16 ++-------------- gpt4all-chat/chat.h | 2 -- 2 files changed, 2 insertions(+), 16 deletions(-) diff --git a/gpt4all-chat/chat.cpp b/gpt4all-chat/chat.cpp index 82a00e2c4f33..cccfff6558c8 100644 --- a/gpt4all-chat/chat.cpp +++ b/gpt4all-chat/chat.cpp @@ -249,6 +249,8 @@ void Chat::setModelInfo(const ModelInfo &modelInfo) if (m_modelInfo == modelInfo) return; + m_isModelLoaded = false; + emit isModelLoadedChanged(); m_modelLoadingError = QString(); emit modelLoadingErrorChanged(); m_modelInfo = modelInfo; @@ -278,20 +280,6 @@ bool Chat::isRecalc() const return m_llmodel->isRecalc(); } -void Chat::loadDefaultModel() -{ - m_modelLoadingError = QString(); - emit modelLoadingErrorChanged(); - emit loadDefaultModelRequested(); -} - -void Chat::loadModel(const ModelInfo &modelInfo) -{ - m_modelLoadingError = QString(); - emit modelLoadingErrorChanged(); - emit loadModelRequested(modelInfo); -} - void Chat::unloadAndDeleteLater() { if (!isModelLoaded()) { diff --git a/gpt4all-chat/chat.h b/gpt4all-chat/chat.h index 22988684adfa..2751e957d6af 100644 --- a/gpt4all-chat/chat.h +++ b/gpt4all-chat/chat.h @@ -69,8 +69,6 @@ class Chat : public QObject void setModelInfo(const ModelInfo &modelInfo); bool isRecalc() const; - void loadDefaultModel(); - void loadModel(const ModelInfo &modelInfo); void unloadModel(); void reloadModel(); void unloadAndDeleteLater(); From 806905f747bef0e15568d609489c706e7ad4c7ca Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 15:27:26 -0400 Subject: [PATCH 024/198] Explicitly set the color in MyTextField. --- gpt4all-chat/qml/MyTextField.qml | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-chat/qml/MyTextField.qml b/gpt4all-chat/qml/MyTextField.qml index 730d80216225..251aaf09aa73 100644 --- a/gpt4all-chat/qml/MyTextField.qml +++ b/gpt4all-chat/qml/MyTextField.qml @@ -11,4 +11,5 @@ TextField { color: theme.backgroundDark radius: 10 } + color: theme.textColor } \ No newline at end of file From 2679dc1521c48c28a2cb06b4baef2ee782adec4d Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 15:35:10 -0400 Subject: [PATCH 025/198] Give note about gpt-4 and openai key access. --- gpt4all-chat/modellist.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index e0bb12de1e53..1a622c0780ad 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -1086,6 +1086,8 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) } { + const QString chatGPT4Warn = tr("

    * Even if you pay OpenAI for ChatGPT-4 this does not guarantee API key access. Contact OpenAI for more info."); + const QString modelName = "ChatGPT-4"; const QString id = modelName; const QString modelFilename = "chatgpt-gpt-4.txt"; @@ -1098,7 +1100,7 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) updateData(id, ModelList::FilesizeRole, "minimal"); updateData(id, ModelList::ChatGPTRole, true); updateData(id, ModelList::DescriptionRole, - tr("OpenAI's ChatGPT model GPT-4
    ") + chatGPTDesc); + tr("OpenAI's ChatGPT model GPT-4
    ") + chatGPTDesc + chatGPT4Warn); updateData(id, ModelList::RequiresVersionRole, "2.4.2"); updateData(id, ModelList::OrderRole, "cb"); updateData(id, ModelList::RamrequiredRole, 0); From ecafacd268d4db13af5ab37141391fd90f4c1e33 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 19:57:15 +0100 Subject: [PATCH 026/198] mapping, csharp-workflow and first attempt to build on Linux --- .circleci/config.yml | 1 + .circleci/continue_config.yml | 54 +++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index bdaf448675e8..dc76e62fe268 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,6 +12,7 @@ workflows: config-path: .circleci/continue_config.yml mapping: | gpt4all-bindings/python/.* run-python-workflow true + gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true gpt4all-chat/.* run-chat-workflow true .* run-default-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 685715803aa2..c688de571b2f 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -13,6 +13,9 @@ parameters: run-chat-workflow: type: boolean default: false + run-csharp-workflow: + type: boolean + default: false jobs: default-job: @@ -306,6 +309,46 @@ jobs: - store_artifacts: path: /tmp/workspace + build-csharp-linux: + working_directory: ~/gpt4all-bindings/csharp + docker: + - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-bindings + - run: + name: Install dependencies + command: | + sudo apt-get update + sudo apt-get install -y cmake build-essential + - run: + name: Build C library + command: | + git submodule init + git submodule update + ./build_linux.sh + - run: + name: "Install project dependencies" + command: | + dotnet.exe restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-bindings + - run: + name: "Run C# Tests" + command: | + dotnet.exe test -v n --filter --filter SKIP_ON_CI=False + - run: + name: Build C# Project + command: | + dotnet build Gpt4All --configuration Release + - persist_to_workspace: + root: gpt4all-bindings/csharp/runtimes/linux-x64/native + paths: + - "gpt4all-bindings/csharp/runtimes/linux-x64/native/*.so" workflows: version: 2 default: @@ -366,3 +409,14 @@ workflows: - build-py-windows - build-py-linux - build-py-macos + build-csharp-deploy: + when: << pipeline.parameters.run-csharp-workflow >> + jobs: + - nuget-hold: + type: approval + - hold: + type: approval + - build-csharp-linux: + filters: + branches: + only: \ No newline at end of file From 2cbe791e5c08055487aa433a2e113dbd8740d4b0 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 19:58:03 +0100 Subject: [PATCH 027/198] add a SkipOnCI trait fore tests --- .../csharp/Gpt4All.Tests/Constants.cs | 15 +++++++-------- .../csharp/Gpt4All.Tests/ModelFactoryTests.cs | 5 ++++- gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs | 6 ++++++ 3 files changed, 17 insertions(+), 9 deletions(-) create mode 100644 gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs index 3b8da77ce3eb..a326f43c8ad4 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/Constants.cs @@ -1,10 +1,9 @@ -namespace Gpt4All.Tests +namespace Gpt4All.Tests; + +public static class Constants { - public static class Constants - { - public const string MODELS_BASE_DIR = "../../../models"; - public const string LLAMA_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-l13b-snoozy.bin"; - public const string GPTJ_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-j-v1.3-groovy.bin"; - public const string MPT_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-mpt-7b-chat.bin"; - } + public const string MODELS_BASE_DIR = "../../../models"; + public const string LLAMA_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-l13b-snoozy.bin"; + public const string GPTJ_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-gpt4all-j-v1.3-groovy.bin"; + public const string MPT_MODEL_PATH = $"{MODELS_BASE_DIR}/ggml-mpt-7b-chat.bin"; } diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs index 19d91488948f..d7b0569e3986 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs @@ -1,4 +1,4 @@ -using Xunit; +using Xunit; namespace Gpt4All.Tests; @@ -12,18 +12,21 @@ public ModelFactoryTests() } [Fact] + [Trait(Traits.SkipOnCI, "True")] public void CanLoadLlamaModel() { using var model = _modelFactory.LoadModel(Constants.LLAMA_MODEL_PATH); } [Fact] + [Trait(Traits.SkipOnCI, "True")] public void CanLoadGptjModel() { using var model = _modelFactory.LoadModel(Constants.GPTJ_MODEL_PATH); } [Fact] + [Trait(Traits.SkipOnCI, "True")] public void CanLoadMptModel() { using var model = _modelFactory.LoadModel(Constants.MPT_MODEL_PATH); diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs new file mode 100644 index 000000000000..572fb1c1f745 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/Traits.cs @@ -0,0 +1,6 @@ +namespace Gpt4All.Tests; + +public static class Traits +{ + public const string SkipOnCI = "SKIP_ON_CI"; +} From ce9e26463ebff576a9ea396356b6950ad911ba25 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:13:14 +0100 Subject: [PATCH 028/198] fix build-csharp-deploy --- .circleci/continue_config.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index c688de571b2f..eacf638ba034 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -409,9 +409,9 @@ workflows: - build-py-windows - build-py-linux - build-py-macos - build-csharp-deploy: - when: << pipeline.parameters.run-csharp-workflow >> - jobs: + build-csharp-deploy: + when: << pipeline.parameters.run-csharp-workflow >> + jobs: - nuget-hold: type: approval - hold: @@ -419,4 +419,6 @@ workflows: - build-csharp-linux: filters: branches: - only: \ No newline at end of file + only: + requires: + - hold \ No newline at end of file From 939ed6a2b5f127969e8a3824c65dfcc39d65ab2b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:18:59 +0100 Subject: [PATCH 029/198] sudo fix --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index eacf638ba034..662269b356f2 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -321,8 +321,8 @@ jobs: - run: name: Install dependencies command: | - sudo apt-get update - sudo apt-get install -y cmake build-essential + apt-get update + apt-get install -y cmake build-essential - run: name: Build C library command: | From 691e4cf6e075d55462b0f7f9e2fecd39a0106a76 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:23:12 +0100 Subject: [PATCH 030/198] fix build C library workdir --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 662269b356f2..5efed7bfaabc 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -310,7 +310,6 @@ jobs: path: /tmp/workspace build-csharp-linux: - working_directory: ~/gpt4all-bindings/csharp docker: - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 steps: @@ -328,6 +327,7 @@ jobs: command: | git submodule init git submodule update + cd gpt4all-bindings/csharp ./build_linux.sh - run: name: "Install project dependencies" From 9bb3000bdb08fc843e26883d7dc6710c426194d0 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:26:34 +0100 Subject: [PATCH 031/198] remove .exe after dotnet + fix cwd --- .circleci/continue_config.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 5efed7bfaabc..a120a813cc3d 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -332,7 +332,8 @@ jobs: - run: name: "Install project dependencies" command: | - dotnet.exe restore Gpt4All + cd gpt4all-bindings/csharp + dotnet restore Gpt4All - save_cache: paths: - ~/.nuget/packages @@ -340,10 +341,12 @@ jobs: - run: name: "Run C# Tests" command: | - dotnet.exe test -v n --filter --filter SKIP_ON_CI=False + cd gpt4all-bindings/csharp + dotnet test -v n --filter --filter SKIP_ON_CI=False - run: name: Build C# Project command: | + cd gpt4all-bindings/csharp dotnet build Gpt4All --configuration Release - persist_to_workspace: root: gpt4all-bindings/csharp/runtimes/linux-x64/native From ce7e02388dfca015b0039a80a29e86ac1bba3524 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:30:42 +0100 Subject: [PATCH 032/198] fix dotnet test target --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index a120a813cc3d..cfcbabd6bc08 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -342,7 +342,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test -v n --filter --filter SKIP_ON_CI=False + dotnet test . --filter --filter SKIP_ON_CI=False - run: name: Build C# Project command: | From e554405aef73983af4c6d376bdb13705c7a7739b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:35:52 +0100 Subject: [PATCH 033/198] remove duplicate --filter --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index cfcbabd6bc08..5bd7b18504f4 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -342,7 +342,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test . --filter --filter SKIP_ON_CI=False + dotnet test . --filter SKIP_ON_CI=False - run: name: Build C# Project command: | From d41f993e67349aa17d8bbbae4afa60a8d88332e4 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:50:47 +0100 Subject: [PATCH 034/198] bump net sdk version to 7.0 to support the tests project --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 5bd7b18504f4..e534fb078b05 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -311,7 +311,7 @@ jobs: build-csharp-linux: docker: - - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 + - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 steps: - checkout - restore_cache: From b877cfa3e9062c9cb2bc5f1cf1487c73bb963445 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 20:57:37 +0100 Subject: [PATCH 035/198] revert sdk version bump + specify test project --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index e534fb078b05..5fc2c1def80f 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -311,7 +311,7 @@ jobs: build-csharp-linux: docker: - - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 + - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 steps: - checkout - restore_cache: @@ -342,7 +342,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test . --filter SKIP_ON_CI=False + dotnet test Gtp4All.Tests --filter SKIP_ON_CI=False - run: name: Build C# Project command: | From 53600a2970cb9b8daebb31e7bb996d7bbc766056 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 21:01:12 +0100 Subject: [PATCH 036/198] fix typo --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 5fc2c1def80f..73087e60979d 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -342,7 +342,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gtp4All.Tests --filter SKIP_ON_CI=False + dotnet test Gpt4All.Tests --filter SKIP_ON_CI=False - run: name: Build C# Project command: | From 7c67134b8c4b02f2b82615f0bc410d3b9dabdcd7 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 21:16:39 +0100 Subject: [PATCH 037/198] try to sort out ci only error on build related to CA2101 --- gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs index 0a6060098f7e..cc43f3662aab 100644 --- a/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs +++ b/gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs @@ -41,7 +41,7 @@ public unsafe partial struct llmodel_prompt_context public float context_erase; } - +#pragma warning disable CA2101 internal static unsafe partial class NativeMethods { [UnmanagedFunctionPointer(CallingConvention.Cdecl)] @@ -105,3 +105,4 @@ public static extern void llmodel_prompt( [return: NativeTypeName("int32_t")] public static extern int llmodel_threadCount([NativeTypeName("llmodel_model")] IntPtr model); } +#pragma warning restore CA2101 From b3f4169466e6539b36f25e9521b3b9d0388e87be Mon Sep 17 00:00:00 2001 From: mvenditto Date: Thu, 15 Jun 2023 21:24:10 +0100 Subject: [PATCH 038/198] fix persist_to_workspace paths --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 73087e60979d..2554886519e6 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -351,7 +351,7 @@ jobs: - persist_to_workspace: root: gpt4all-bindings/csharp/runtimes/linux-x64/native paths: - - "gpt4all-bindings/csharp/runtimes/linux-x64/native/*.so" + - "*.so" workflows: version: 2 default: From 23af0416738095713dfadaeb16def1b853890bfa Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 13:06:23 +0100 Subject: [PATCH 039/198] add build-csharp-windows (mingw) --- .circleci/continue_config.yml | 54 +++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 2554886519e6..f0dae76db0cd 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -352,6 +352,54 @@ jobs: root: gpt4all-bindings/csharp/runtimes/linux-x64/native paths: - "*.so" + + build-csharp-windows: + executor: + name: win/default + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-bindings + - run: + name: Install MinGW64 + command: choco install -y mingw --force --no-progress + - run: + name: Add MinGW64 to PATH + command: $env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + - run: + name: Install dependencies + command: choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' + - run: + name: Build C library + command: | + git submodule init + git submodule update + ./build_win-mingw.ps1 + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet.exe restore Gpt4All + - save_cache: + paths: + - C:\Users\circleci\.nuget\packages + key: gpt4all-csharp-bindings + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI=False + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet.exe build Gpt4All --configuration Release + - persist_to_workspace: + root: gpt4all-bindings/csharp/runtimes/windows-x64/native + paths: + - "*.dll" + workflows: version: 2 default: @@ -420,6 +468,12 @@ workflows: - hold: type: approval - build-csharp-linux: + filters: + branches: + only: + requires: + - hold + - build-csharp-windows: filters: branches: only: From 5fe4f25d645211819981e20e184404a3692986e6 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 13:15:44 +0100 Subject: [PATCH 040/198] fix curr working directory --- .circleci/continue_config.yml | 1 + gpt4all-bindings/csharp/build_win-mingw.ps1 | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index f0dae76db0cd..bf7bc603209b 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -375,6 +375,7 @@ jobs: command: | git submodule init git submodule update + cd gpt4all-bindings/csharp ./build_win-mingw.ps1 - run: name: "Install project dependencies" diff --git a/gpt4all-bindings/csharp/build_win-mingw.ps1 b/gpt4all-bindings/csharp/build_win-mingw.ps1 index 1e3dd8ef4aed..33b343152699 100644 --- a/gpt4all-bindings/csharp/build_win-mingw.ps1 +++ b/gpt4all-bindings/csharp/build_win-mingw.ps1 @@ -13,5 +13,4 @@ cmake --build $BUILD_DIR --parallel --config Release # copy native dlls cp "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin\*dll" $LIBS_DIR -cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR -mv $LIBS_DIR\llmodel.dll $LIBS_DIR\libllmodel.dll \ No newline at end of file +cp "$BUILD_DIR\bin\*.dll" $LIBS_DIR \ No newline at end of file From ae8bcd9eff440d0afc5661f74b9db066958a68c6 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 13:55:14 +0100 Subject: [PATCH 041/198] try to fix cmake not in path --- .circleci/continue_config.yml | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index bf7bc603209b..67125276d625 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -364,18 +364,18 @@ jobs: - run: name: Install MinGW64 command: choco install -y mingw --force --no-progress - - run: - name: Add MinGW64 to PATH - command: $env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" - run: name: Install dependencies - command: choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' + command: | + choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' - run: name: Build C library command: | git submodule init - git submodule update + git submodule update --recursive cd gpt4all-bindings/csharp + $env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + $env.Path += ";C:\Program Files\CMake\bin" ./build_win-mingw.ps1 - run: name: "Install project dependencies" @@ -401,6 +401,18 @@ jobs: paths: - "*.dll" + store-and-upload-nupkgs: + docker: + - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 + steps: + - setup_remote_docker + - attach_workspace: + at: /tmp/workspace + - run: + name: TEST - list libraries + command: | + ls -R /tmp/workspace + workflows: version: 2 default: @@ -479,4 +491,12 @@ workflows: branches: only: requires: - - hold \ No newline at end of file + - hold + - store-and-upload-nupkgs: + filters: + branches: + only: + requires: + - nuget-hold + - build-py-windows + - build-py-linux \ No newline at end of file From 1e160340bd99aec1a3268209246234ebee32a08e Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 13:56:14 +0100 Subject: [PATCH 042/198] fix naming --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 67125276d625..8afb3f00d807 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -498,5 +498,5 @@ workflows: only: requires: - nuget-hold - - build-py-windows - - build-py-linux \ No newline at end of file + - build-csharp-windows + - build-csharp-linux \ No newline at end of file From 5b242ba7a9f866e7dd6598b24133be19b624a4ea Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:04:51 +0100 Subject: [PATCH 043/198] fix typo --- .circleci/continue_config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 8afb3f00d807..3b57db1f5332 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -356,6 +356,7 @@ jobs: build-csharp-windows: executor: name: win/default + shell: powershell.exe -ExecutionPolicy Bypass steps: - checkout - restore_cache: @@ -374,8 +375,8 @@ jobs: git submodule init git submodule update --recursive cd gpt4all-bindings/csharp - $env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" - $env.Path += ";C:\Program Files\CMake\bin" + $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + $Env:Path += ";C:\Program Files\CMake\bin" ./build_win-mingw.ps1 - run: name: "Install project dependencies" From a574d79fb3f06dc388784cd463de43b6499356d0 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:17:16 +0100 Subject: [PATCH 044/198] fix mismatchin runtimes folder name --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 3b57db1f5332..416763b710be 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -398,7 +398,7 @@ jobs: cd gpt4all-bindings/csharp dotnet.exe build Gpt4All --configuration Release - persist_to_workspace: - root: gpt4all-bindings/csharp/runtimes/windows-x64/native + root: gpt4all-bindings/csharp/runtimes/win-x64/native paths: - "*.dll" From e40fb67b855737d7c705a345a4c2f993f6de88e0 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:22:54 +0100 Subject: [PATCH 045/198] switch to windows.large --- .circleci/continue_config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 416763b710be..10dd0daa732d 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -356,6 +356,7 @@ jobs: build-csharp-windows: executor: name: win/default + resource_class: windows.large shell: powershell.exe -ExecutionPolicy Bypass steps: - checkout From d3831f7dbe61018ad8e3eae3a110a8e147a5fbf4 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:38:15 +0100 Subject: [PATCH 046/198] first attempt to store test results --- .circleci/continue_config.yml | 19 ++++++++++++++----- .../csharp/Gpt4All.Tests/ModelFactoryTests.cs | 5 +++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 10dd0daa732d..782766724ac2 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -339,15 +339,24 @@ jobs: - ~/.nuget/packages key: gpt4all-csharp-bindings - run: - name: "Run C# Tests" + name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests --filter SKIP_ON_CI=False + dotnet build Gpt4All --configuration Release - run: - name: Build C# Project + name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release + dotnet test Gpt4All.Tests --no-build --filter SKIP_ON_CI!=True --logger "trx" + - run: + name: test results + when: always + command: | + dotnet tool install -g trx2junit + export PATH="$PATH:/root/.dotnet/tools" + trx2junit tests/**/TestResults/*.trx + - store_test_results: + path: tests/TestResults - persist_to_workspace: root: gpt4all-bindings/csharp/runtimes/linux-x64/native paths: @@ -392,7 +401,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI=False + dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI!=True - run: name: Build C# Project command: | diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs index d7b0569e3986..bc5b50ed78f1 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs @@ -31,4 +31,9 @@ public void CanLoadMptModel() { using var model = _modelFactory.LoadModel(Constants.MPT_MODEL_PATH); } + + [Fact] + public void DummyTest() + { + } } From 444d922ccd9c3503ec7cb9d963908f90db5be953 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:41:05 +0100 Subject: [PATCH 047/198] fix executor class --- .circleci/continue_config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 782766724ac2..01293222c622 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -364,8 +364,7 @@ jobs: build-csharp-windows: executor: - name: win/default - resource_class: windows.large + name: win/large shell: powershell.exe -ExecutionPolicy Bypass steps: - checkout From 4c3507db9519c74574425eb19d93aafc2322b54f Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 14:41:51 +0100 Subject: [PATCH 048/198] again fix executor size --- .circleci/continue_config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 01293222c622..47ae359808cb 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -364,7 +364,8 @@ jobs: build-csharp-windows: executor: - name: win/large + name: win/default + size: large shell: powershell.exe -ExecutionPolicy Bypass steps: - checkout From a92fe0a0895190ae396defb6c57eff66eab19536 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 15:00:12 +0100 Subject: [PATCH 049/198] attempt to fix tests --- .circleci/continue_config.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 47ae359808cb..d03a0e873431 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,20 +347,21 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet test Gpt4All.Tests --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit export PATH="$PATH:/root/.dotnet/tools" - trx2junit tests/**/TestResults/*.trx + trx2junit TestResults/*.trx - store_test_results: path: tests/TestResults - persist_to_workspace: - root: gpt4all-bindings/csharp/runtimes/linux-x64/native + root: gpt4all-bindings/csharp paths: - - "*.so" + - runtimes/linux-x64/native build-csharp-windows: executor: From a987d0a98fcd56d38c42d569a909821c7f5fdb16 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 15:06:32 +0100 Subject: [PATCH 050/198] fix store_test_results path --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index d03a0e873431..c608e10e060f 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -357,7 +357,7 @@ jobs: export PATH="$PATH:/root/.dotnet/tools" trx2junit TestResults/*.trx - store_test_results: - path: tests/TestResults + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - persist_to_workspace: root: gpt4all-bindings/csharp paths: From 33ead4cbf1dbb4ef2670bee3aa6f7b0416f6d788 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 15:11:07 +0100 Subject: [PATCH 051/198] tests on windows --- .circleci/continue_config.yml | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index c608e10e060f..deee364c48d8 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -399,19 +399,29 @@ jobs: - C:\Users\circleci\.nuget\packages key: gpt4all-csharp-bindings - run: - name: "Run C# Tests" + name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI!=True + dotnet.exe build Gpt4All --configuration Release - run: - name: Build C# Project + name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release + dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI!=True --logger "trx" + - run: + name: test results + when: always + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - persist_to_workspace: - root: gpt4all-bindings/csharp/runtimes/win-x64/native + root: gpt4all-bindings/csharp paths: - - "*.dll" + - runtimes/win-x64/native/ store-and-upload-nupkgs: docker: From 9e77a1bb6fb9abd7b015e616e059ce6feb98ad74 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 17:01:02 +0100 Subject: [PATCH 052/198] fix --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index deee364c48d8..7ca83f79edc0 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -414,7 +414,7 @@ jobs: command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit - export $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" + $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults From 998fea832f8010151bb2d276e77c87ce51fd7759 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:10:35 +0100 Subject: [PATCH 053/198] macos first attempt --- .circleci/continue_config.yml | 74 +++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 7ca83f79edc0..974ff9f1f1d6 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,7 +347,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests --filter SKIP_ON_CI!=True --logger "trx" + dotnet test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always @@ -407,7 +407,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests --filter SKIP_ON_CI!=True --logger "trx" + dotnet.exe test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always @@ -423,6 +423,67 @@ jobs: paths: - runtimes/win-x64/native/ + build-csharp-macos: + macos: + xcode: "14.2.0" + resource_class: macos.m1.large.gen1 + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-bindings + - run: + name: Install dependencies + command: | + brew install cmake + curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh + ./dotnet_install.sh -Channel 6.0.1xx + - run: + name: Build C library + command: | + git submodule init + git submodule update --recursive + BASE_DIR="runtimes/osx-x64" + NATIVE_DIR="$BASE_DIR/native" + BUILD_DIR="$BASE_DIR/build" + mkdir -p "$NATIVE_DIR" "$BUILD_DIR" + cmake -S ../../gpt4all-backend -B "$BUILD_DIR" -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" + cmake --build "$BUILD_DIR" -j --config Release + cp "$BUILD_DIR"/*.dylib "$NATIVE_DIR" + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-bindings + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + - run: + name: test results + when: always + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:/root/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + - persist_to_workspace: + root: gpt4all-bindings/csharp + paths: + - runtimes/osx-x64/native + store-and-upload-nupkgs: docker: - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 @@ -514,6 +575,12 @@ workflows: only: requires: - hold + - build-csharp-macos: + filters: + branches: + only: + requires: + - hold - store-and-upload-nupkgs: filters: branches: @@ -521,4 +588,5 @@ workflows: requires: - nuget-hold - build-csharp-windows - - build-csharp-linux \ No newline at end of file + - build-csharp-linux + - build-csharp-macos \ No newline at end of file From 8d77d9ad895019b95011abc821305f08ff966560 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:14:13 +0100 Subject: [PATCH 054/198] switch to medium for macos to test on free plan --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 974ff9f1f1d6..0f4a3ffa5216 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -426,7 +426,7 @@ jobs: build-csharp-macos: macos: xcode: "14.2.0" - resource_class: macos.m1.large.gen1 + resource_class: macos.m1.medium.gen1 steps: - checkout - restore_cache: From d290ecee3437280d72cf1f74e60500112aab43d5 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:17:09 +0100 Subject: [PATCH 055/198] try again --- .circleci/continue_config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 0f4a3ffa5216..7fe3dbcfd6c5 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -425,8 +425,7 @@ jobs: build-csharp-macos: macos: - xcode: "14.2.0" - resource_class: macos.m1.medium.gen1 + xcode: "14.0.0" steps: - checkout - restore_cache: From a4cbaa8263345cdfc414c85c4d5bd70020499c9b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:22:45 +0100 Subject: [PATCH 056/198] fix --- .circleci/continue_config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 7fe3dbcfd6c5..d0571dd917c6 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,7 +347,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always @@ -407,7 +407,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet.exe test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always @@ -467,7 +467,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" - run: name: test results when: always From 53ac1de5a963879da8eec875178037b556757c23 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:37:03 +0100 Subject: [PATCH 057/198] another attempt to fix messed up tests + macos dotnet install --- .circleci/continue_config.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index d0571dd917c6..d06dd42c23ce 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,10 +347,9 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet test -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results - when: always command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit @@ -407,10 +406,9 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet.exe test -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results - when: always command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit @@ -436,7 +434,7 @@ jobs: command: | brew install cmake curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh - ./dotnet_install.sh -Channel 6.0.1xx + ./dotnet-install.sh -Channel 6.0.1xx - run: name: Build C library command: | @@ -467,10 +465,9 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test -c Release --no-build --filter SKIP_ON_CI!=True --logger "trx" + dotnet test -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results - when: always command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit From c76e05c84c9e0edb670772d8805d51b5fa8b0efa Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:43:55 +0100 Subject: [PATCH 058/198] try to fix tests build the samples proj --- .circleci/continue_config.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index d06dd42c23ce..576074cb97ef 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,7 +347,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | @@ -406,7 +406,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet.exe test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | @@ -434,6 +434,7 @@ jobs: command: | brew install cmake curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh + chmod +x dotnet-install.sh ./dotnet-install.sh -Channel 6.0.1xx - run: name: Build C library @@ -465,7 +466,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | From f14b1869d9b3d5f2918d1fb99749acdd772f694c Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 18:56:33 +0100 Subject: [PATCH 059/198] fix cwd on macos build step --- .circleci/continue_config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 576074cb97ef..a0bf558eb2e3 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -441,6 +441,7 @@ jobs: command: | git submodule init git submodule update --recursive + cd gpt4all-bindings/csharp BASE_DIR="runtimes/osx-x64" NATIVE_DIR="$BASE_DIR/native" BUILD_DIR="$BASE_DIR/build" From 79767148e0a184da8f8a6403fbf17ae630c7f532 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:06:40 +0100 Subject: [PATCH 060/198] macos try to install dotnet with brew --- .circleci/continue_config.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index a0bf558eb2e3..f8feb50363b9 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -433,9 +433,7 @@ jobs: name: Install dependencies command: | brew install cmake - curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh - chmod +x dotnet-install.sh - ./dotnet-install.sh -Channel 6.0.1xx + brew install dotnet@6 - run: name: Build C library command: | From 3ef1cbef90eaa2f69423706032694e17cbb3f8f5 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:10:00 +0100 Subject: [PATCH 061/198] revert --- .circleci/continue_config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index f8feb50363b9..ef105928e732 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -433,7 +433,10 @@ jobs: name: Install dependencies command: | brew install cmake - brew install dotnet@6 + curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh + chmod +x dotnet-install.sh + ./dotnet-install.sh -Channel 6.0.1xx + dotnet --version - run: name: Build C library command: | From 95651809b3efaf51f8d322e34485f88d1c6badcd Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:22:14 +0100 Subject: [PATCH 062/198] try to fix dotnet install on macos --- .circleci/continue_config.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index ef105928e732..d2f81e727094 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -435,8 +435,9 @@ jobs: brew install cmake curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh chmod +x dotnet-install.sh - ./dotnet-install.sh -Channel 6.0.1xx - dotnet --version + ./dotnet-install.sh --channel 6.0.1xx --install-dir $HOME/cli-tools + ls $HOME/cli-tools + $HOME/cli-tools/dotnet --version - run: name: Build C library command: | @@ -454,7 +455,7 @@ jobs: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp - dotnet restore Gpt4All + $HOME/cli-tools/dotnet restore Gpt4All - save_cache: paths: - ~/.nuget/packages @@ -463,12 +464,12 @@ jobs: name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release + $HOME/cli-tools/dotnet build Gpt4All --configuration Release - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + $HOME/cli-tools/dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | From 2f59332f9a0dcc1c76067869465a6d6e5f4af625 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:37:42 +0100 Subject: [PATCH 063/198] try to fix weird build errors an macos --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index d2f81e727094..d200e1306208 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -435,9 +435,9 @@ jobs: brew install cmake curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh chmod +x dotnet-install.sh - ./dotnet-install.sh --channel 6.0.1xx --install-dir $HOME/cli-tools + ./dotnet-install.sh --channel 7.0.1xx --install-dir $HOME/cli-tools ls $HOME/cli-tools - $HOME/cli-tools/dotnet --version + $HOME/cli-tools/dotnet --info - run: name: Build C library command: | From 5dcfdc192b7d2b1c465b8a692f5fe439ff892cfe Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:54:05 +0100 Subject: [PATCH 064/198] try an alternative build on macos --- .circleci/continue_config.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index d200e1306208..91117fd7c317 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -433,11 +433,8 @@ jobs: name: Install dependencies command: | brew install cmake - curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh - chmod +x dotnet-install.sh - ./dotnet-install.sh --channel 7.0.1xx --install-dir $HOME/cli-tools - ls $HOME/cli-tools - $HOME/cli-tools/dotnet --info + brew install dotnet@6 + brew install dotnet@7 - run: name: Build C library command: | From c3aafc81d9b6dffda216ed133d93daeb53c86900 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 19:54:24 +0100 Subject: [PATCH 065/198] debug --- .circleci/continue_config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 91117fd7c317..4aec410968a9 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -435,6 +435,7 @@ jobs: brew install cmake brew install dotnet@6 brew install dotnet@7 + dotnet --info - run: name: Build C library command: | From 3853560a4475affb19efebb32b6326dcecdf6da5 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 20:04:08 +0100 Subject: [PATCH 066/198] fix --- .circleci/continue_config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 4aec410968a9..10f9ca3c0585 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -453,7 +453,7 @@ jobs: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet restore Gpt4All + dotnet restore Gpt4All - save_cache: paths: - ~/.nuget/packages @@ -462,12 +462,12 @@ jobs: name: Build C# Project command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet build Gpt4All --configuration Release + dotnet build Gpt4All --configuration Release - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | From 1d570bfe7694e0a30fa5b17aeb60ca33eb021cdf Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 20:12:38 +0100 Subject: [PATCH 067/198] bump test to net 7 --- gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj b/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj index a2918628a0c2..03bc3e3d423f 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj @@ -1,7 +1,7 @@ - net6.0 + net7.0 enable false From 54efa75c7d7e718187dbaeb9fc2bf69c69431e97 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 20:13:53 +0100 Subject: [PATCH 068/198] try bump sdk to 7 --- .circleci/continue_config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 10f9ca3c0585..04b3fd583590 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -311,7 +311,7 @@ jobs: build-csharp-linux: docker: - - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 + - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 steps: - checkout - restore_cache: @@ -433,7 +433,6 @@ jobs: name: Install dependencies command: | brew install cmake - brew install dotnet@6 brew install dotnet@7 dotnet --info - run: From 7805492c4fd6f9ffb3b7c4c85a8a0423dd7472f6 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 22:30:55 +0200 Subject: [PATCH 069/198] try fix dotnet tools path on osx + bugfix --- .circleci/continue_config.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 04b3fd583590..100460a144f4 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -16,7 +16,7 @@ parameters: run-csharp-workflow: type: boolean default: false - + jobs: default-job: docker: @@ -326,7 +326,7 @@ jobs: name: Build C library command: | git submodule init - git submodule update + git submodule update --recursive cd gpt4all-bindings/csharp ./build_linux.sh - run: @@ -353,7 +353,7 @@ jobs: command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit - export PATH="$PATH:/root/.dotnet/tools" + export PATH="$PATH:$HOME/.dotnet/tools" trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults @@ -472,7 +472,7 @@ jobs: command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit - export PATH="$PATH:/root/.dotnet/tools" + export PATH="$PATH:$HOME/.dotnet/tools" trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults From a2d59b09e56bb99ffdbed4ed53d818729ec38abf Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 22:50:20 +0200 Subject: [PATCH 070/198] try fix nuget cache issue on win and macos dotnet tool path --- .circleci/continue_config.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 100460a144f4..518fafbb3634 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -371,7 +371,7 @@ jobs: - checkout - restore_cache: keys: - - gpt4all-csharp-bindings + - gpt4all-csharp-nuget-packages-win - run: name: Install MinGW64 command: choco install -y mingw --force --no-progress @@ -396,7 +396,7 @@ jobs: - save_cache: paths: - C:\Users\circleci\.nuget\packages - key: gpt4all-csharp-bindings + key: gpt4all-csharp-nuget-packages-win - run: name: Build C# Project command: | @@ -433,8 +433,10 @@ jobs: name: Install dependencies command: | brew install cmake - brew install dotnet@7 - dotnet --info + curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh + chmod +x dotnet-install.sh + ./dotnet-install.sh -Channel 7.0.1xx --install-dir $HOME/cli-tools + $HOME/cli-tools/dotnet --info - run: name: Build C library command: | @@ -452,7 +454,7 @@ jobs: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp - dotnet restore Gpt4All + $HOME/cli-tools/dotnet restore Gpt4All - save_cache: paths: - ~/.nuget/packages @@ -461,18 +463,19 @@ jobs: name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release + $HOME/cli-tools/dotnet build Gpt4All --configuration Release - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + $HOME/cli-tools/dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: test results command: | cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit + $HOME/cli-tools/dotnet tool install -g trx2junit export PATH="$PATH:$HOME/.dotnet/tools" + ls $HOME/.dotnet/tools trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults From 6e044e1a89bdc98a9819bbb3d5be2200ad2bb7e3 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 23:07:28 +0200 Subject: [PATCH 071/198] add --nologo to suppress welcome message + cleanup --- .circleci/continue_config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 518fafbb3634..04be89ec1d5a 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -316,7 +316,7 @@ jobs: - checkout - restore_cache: keys: - - gpt4all-csharp-bindings + - gpt4all-csharp-nuget-packages-nix - run: name: Install dependencies command: | @@ -337,19 +337,19 @@ jobs: - save_cache: paths: - ~/.nuget/packages - key: gpt4all-csharp-bindings + key: gpt4all-csharp-nuget-packages-nix - run: name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release + dotnet build Gpt4All --configuration Release --nologo - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" - run: - name: test results + name: Test results command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit @@ -401,14 +401,14 @@ jobs: name: Build C# Project command: | cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release + dotnet.exe build Gpt4All --configuration Release --nologo - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet.exe test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" - run: - name: test results + name: Test results command: | cd gpt4all-bindings/csharp/Gpt4All.Tests dotnet tool install -g trx2junit @@ -428,7 +428,7 @@ jobs: - checkout - restore_cache: keys: - - gpt4all-csharp-bindings + - gpt4all-csharp-nuget-packages-nix - run: name: Install dependencies command: | @@ -458,24 +458,24 @@ jobs: - save_cache: paths: - ~/.nuget/packages - key: gpt4all-csharp-bindings + key: gpt4all-csharp-nuget-packages-nix - run: name: Build C# Project command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet build Gpt4All --configuration Release + $HOME/cli-tools/dotnet build Gpt4All --configuration Release --nologo - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet test Gpt4All.Tests -v n --filter "SKIP_ON_CI!=True" --logger "trx" + $HOME/cli-tools/dotnet test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" - run: - name: test results + name: Test results command: | cd gpt4all-bindings/csharp/Gpt4All.Tests $HOME/cli-tools/dotnet tool install -g trx2junit export PATH="$PATH:$HOME/.dotnet/tools" - ls $HOME/.dotnet/tools + export PATH="$PATH:$HOME/cli-tools" trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults From d9fab97e838cfb39b2d6a840c47165b5f09dd50d Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 23:12:09 +0200 Subject: [PATCH 072/198] remove --no-build from test --- .circleci/continue_config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 04be89ec1d5a..48ccecc3530f 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -347,7 +347,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: Test results command: | @@ -406,7 +406,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: Test results command: | @@ -468,7 +468,7 @@ jobs: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet test Gpt4All.Tests -v n -c Release --no-build --filter "SKIP_ON_CI!=True" --logger "trx" + $HOME/cli-tools/dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: Test results command: | From ddf124bb7628065e5ed8d0ee441c7d0983b39d29 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Fri, 16 Jun 2023 23:45:22 +0200 Subject: [PATCH 073/198] fix dotnet install version on macos --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 48ccecc3530f..b837a2e78c0b 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -435,7 +435,7 @@ jobs: brew install cmake curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh chmod +x dotnet-install.sh - ./dotnet-install.sh -Channel 7.0.1xx --install-dir $HOME/cli-tools + ./dotnet-install.sh -Channel 7.0 --install-dir $HOME/cli-tools $HOME/cli-tools/dotnet --info - run: name: Build C library From 7e92f9c4012559be00141e98068d8a54f4e2685c Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sat, 17 Jun 2023 00:07:23 +0200 Subject: [PATCH 074/198] macos again --- .circleci/continue_config.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index b837a2e78c0b..56f7b8f2f9e7 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -433,10 +433,11 @@ jobs: name: Install dependencies command: | brew install cmake - curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh - chmod +x dotnet-install.sh - ./dotnet-install.sh -Channel 7.0 --install-dir $HOME/cli-tools - $HOME/cli-tools/dotnet --info + brew install --cask dotnet-sdk + # curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh + # chmod +x dotnet-install.sh + # ./dotnet-install.sh -Channel 7.0 --install-dir $HOME/cli-tools + # $HOME/cli-tools/dotnet --info - run: name: Build C library command: | @@ -454,7 +455,7 @@ jobs: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet restore Gpt4All + dotnet restore Gpt4All - save_cache: paths: - ~/.nuget/packages @@ -463,19 +464,18 @@ jobs: name: Build C# Project command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet build Gpt4All --configuration Release --nologo + dotnet build Gpt4All --configuration Release --nologo - run: name: "Run C# Tests" command: | cd gpt4all-bindings/csharp - $HOME/cli-tools/dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - run: name: Test results command: | cd gpt4all-bindings/csharp/Gpt4All.Tests - $HOME/cli-tools/dotnet tool install -g trx2junit + dotnet tool install -g trx2junit export PATH="$PATH:$HOME/.dotnet/tools" - export PATH="$PATH:$HOME/cli-tools" trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults From c3ad76dcd1f85cc49e310138af77b16014edd2de Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sat, 17 Jun 2023 00:18:57 +0200 Subject: [PATCH 075/198] update deps for test project --- .../csharp/Gpt4All.Tests/Gpt4All.Tests.csproj | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj b/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj index 03bc3e3d423f..c76eb1b5166a 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/Gpt4All.Tests.csproj @@ -8,13 +8,13 @@ - - - + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - + runtime; build; native; contentfiles; analyzers; buildtransitive all @@ -41,4 +41,19 @@ + + + + all + runtime; build; native; contentfiles; analyzers + + + all + runtime; build; native; contentfiles; analyzers + + + all + runtime; build; native; contentfiles; analyzers + + From 4697b968a8f0126b47aed2f68044aa9c3a53b154 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 16:40:21 +0200 Subject: [PATCH 076/198] better restore cache + some experimentation --- .circleci/continue_config.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 56f7b8f2f9e7..1aba95e6fa04 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -328,12 +328,22 @@ jobs: git submodule init git submodule update --recursive cd gpt4all-bindings/csharp - ./build_linux.sh + git submodule init + git submodule update --recursive + cd gpt4all-bindings/csharp + BASE_DIR="runtimes/linux-x64" + NATIVE_DIR="$BASE_DIR/native" + BUILD_DIR="$BASE_DIR/build" + mkdir -p "$NATIVE_DIR" "$BUILD_DIR" + cmake -S ../../gpt4all-backend -B "$BUILD_DIR" + cmake --build "$BUILD_DIR" -j --config Release + cp "$BUILD_DIR"/*.so "$NATIVE_DIR" - run: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp dotnet restore Gpt4All + dotnet restore Gpt4All.Tests - save_cache: paths: - ~/.nuget/packages @@ -388,11 +398,16 @@ jobs: $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" $Env:Path += ";C:\Program Files\CMake\bin" ./build_win-mingw.ps1 + - run: + name: TEST bash under windows + command: echo $(uname) + shell: bash.exe - run: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp dotnet.exe restore Gpt4All + dotnet.exe restore Gpt4All.Tests - save_cache: paths: - C:\Users\circleci\.nuget\packages @@ -456,6 +471,7 @@ jobs: command: | cd gpt4all-bindings/csharp dotnet restore Gpt4All + dotnet restore Gpt4All.Tests - save_cache: paths: - ~/.nuget/packages From d151beb8bf285d7a963125df829da6cd24a283be Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 16:45:40 +0200 Subject: [PATCH 077/198] fix --- .circleci/continue_config.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 1aba95e6fa04..952bb8882710 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -325,9 +325,6 @@ jobs: - run: name: Build C library command: | - git submodule init - git submodule update --recursive - cd gpt4all-bindings/csharp git submodule init git submodule update --recursive cd gpt4all-bindings/csharp From 422aecc5ba3cd16f52fdf2108c6644b2be7ec596 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 16:52:16 +0200 Subject: [PATCH 078/198] revert some bad changes --- .circleci/continue_config.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 952bb8882710..42b889c2f0cd 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -340,7 +340,6 @@ jobs: command: | cd gpt4all-bindings/csharp dotnet restore Gpt4All - dotnet restore Gpt4All.Tests - save_cache: paths: - ~/.nuget/packages @@ -395,16 +394,11 @@ jobs: $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" $Env:Path += ";C:\Program Files\CMake\bin" ./build_win-mingw.ps1 - - run: - name: TEST bash under windows - command: echo $(uname) - shell: bash.exe - run: name: "Install project dependencies" command: | cd gpt4all-bindings/csharp dotnet.exe restore Gpt4All - dotnet.exe restore Gpt4All.Tests - save_cache: paths: - C:\Users\circleci\.nuget\packages @@ -468,7 +462,6 @@ jobs: command: | cd gpt4all-bindings/csharp dotnet restore Gpt4All - dotnet restore Gpt4All.Tests - save_cache: paths: - ~/.nuget/packages From ec9148f52c7d3240e69e110a4c5d554513c4c676 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 22:55:32 +0200 Subject: [PATCH 079/198] further tests --- .circleci/config.yml | 1 + .circleci/continue_config.yml | 143 +++++++++++++++++++++++++++++++++- 2 files changed, 143 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dc76e62fe268..541bb916a843 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,6 +11,7 @@ workflows: base-revision: main config-path: .circleci/continue_config.yml mapping: | + gpt4all-bindigs/* run-bindings-workflow true gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 42b889c2f0cd..e2ae3ee125c0 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -13,10 +13,13 @@ parameters: run-chat-workflow: type: boolean default: false + run-bindings-backend-workflow: + type: boolean + default: false run-csharp-workflow: type: boolean default: false - + jobs: default-job: docker: @@ -309,6 +312,123 @@ jobs: - store_artifacts: path: /tmp/workspace + build-bindings-backend-linux: + machine: + image: ubuntu-2204:2023.04.2 + steps: + - checkout + - run: + name: Update Submodules + command: | + git submodule sync + git submodule update --init --recursive + - run: + name: Install dependencies + command: | + apt-get update + apt-get install -y cmake build-essential + - run: + name: Build Libraries + command: | + cd gpt4all-backend + mkdir linux-x64 + cd linux-x64 + cmake .. + cmake --build . --parallel --config Release + cp *.so "$NATIVE_DIR" + - persist_to_workspace: + root: gpt4all-backend + paths: + - linux-x64/*.so + + build-bindings-backend-macos: + macos: + xcode: "14.0.0" + steps: + - checkout + - run: + name: Update Submodules + command: | + git submodule sync + git submodule update --init --recursive + - run: + name: Install dependencies + command: | + brew install cmake + - run: + name: Build Libraries + command: | + cd gpt4all-backend + mkdir osx-x64 + cd osx-x64 + cmake .. + cmake --build . --parallel --config Release -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" + cp *.dylib "$NATIVE_DIR" + - persist_to_workspace: + root: gpt4all-backend + paths: + - osx-x64/*.dylib + + build-bindings-backend-windows: + executor: + name: win/default + size: large + shell: powershell.exe -ExecutionPolicy Bypass + steps: + - checkout + - run: + name: Update Submodules + command: | + git submodule sync + git submodule update --init --recursive + - run: + name: Install MinGW64 + command: choco install -y mingw --force --no-progress + - run: + name: Install dependencies + command: | + choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' + - run: + name: Build Libraries + command: | + $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + $Env:Path += ";C:\Program Files\CMake\bin" + cd gpt4all-backend + mkdir win-x64 + cd win-x64 + cmake -G "MinGW Makefiles" .. + cmake --build . --parallel --config Release + cp "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin\*dll" . + - persist_to_workspace: + root: gpt4all-backend + paths: + - win-x64/*.dll + + build-bindings-backend-windows-msvc: + machine: + image: 'windows-server-2022-gui:2023.03.1' + resource_class: windows.large + shell: powershell.exe -ExecutionPolicy Bypass + steps: + - checkout + - run: + name: Update Submodules + command: | + git submodule sync + git submodule update --init --recursive + - run: + name: Build Libraries + command: | + cd gpt4all-backend + mkdir win-x64_msvc + cd win-x64_msvc + cmake -G "Visual Studio 17 2022" -A X64 .. + cmake --build . --parallel --config Release + - persist_to_workspace: + root: gpt4all-backend + paths: + - win-x64_msvc/*.dll + build-csharp-linux: docker: - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 @@ -562,6 +682,27 @@ workflows: - build-py-windows - build-py-linux - build-py-macos + build-bindings-backend: + when: << pipeline.parameters.run-bindings-backend-workflow >> + jobs: + - hold: + type: approval + - build-bindings-backend-linux: + filter: + branches: + only: + - build-bindings-backend-macos: + filter: + branches: + only: + - build-bindings-backend-windows: + filter: + branches: + only: + - build-bindings-backend-windows-msvc: + filter: + branches: + only: build-csharp-deploy: when: << pipeline.parameters.run-csharp-workflow >> jobs: From 3c126ffa03b118b9de6ebee72b5861ca3671097f Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 22:56:43 +0200 Subject: [PATCH 080/198] typo --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 541bb916a843..0ed4b6410180 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,7 +11,7 @@ workflows: base-revision: main config-path: .circleci/continue_config.yml mapping: | - gpt4all-bindigs/* run-bindings-workflow true + gpt4all-bindigs/.* run-bindings-workflow true gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true From fec2fd2832b26f82e35a0afa5129363c77446909 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:00:39 +0200 Subject: [PATCH 081/198] try to fix error --- .circleci/continue_config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index e2ae3ee125c0..914b11ee5555 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -13,7 +13,7 @@ parameters: run-chat-workflow: type: boolean default: false - run-bindings-backend-workflow: + run-bindings-workflow: type: boolean default: false run-csharp-workflow: @@ -683,10 +683,10 @@ workflows: - build-py-linux - build-py-macos build-bindings-backend: - when: << pipeline.parameters.run-bindings-backend-workflow >> + when: << pipeline.parameters.run-bindings-workflow >> jobs: - hold: - type: approval + type: approval - build-bindings-backend-linux: filter: branches: From cd3bfea09b68c123e01ae42b8d91cdaa46ac8f55 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:01:34 +0200 Subject: [PATCH 082/198] fix filters --- .circleci/continue_config.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 914b11ee5555..eb36026c35cf 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -688,19 +688,19 @@ workflows: - hold: type: approval - build-bindings-backend-linux: - filter: + filters: branches: only: - build-bindings-backend-macos: - filter: + filters: branches: only: - build-bindings-backend-windows: - filter: + filters: branches: only: - build-bindings-backend-windows-msvc: - filter: + filters: branches: only: build-csharp-deploy: From 021a388b385b20e248e80b2cd583156324d4d47b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:03:13 +0200 Subject: [PATCH 083/198] typo again, should sleep --- .circleci/config.yml | 2 +- .circleci/continue_config.yml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0ed4b6410180..c28fdeaf1383 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,7 +11,7 @@ workflows: base-revision: main config-path: .circleci/continue_config.yml mapping: | - gpt4all-bindigs/.* run-bindings-workflow true + gpt4all-bindings/.* run-bindings-workflow true gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index eb36026c35cf..359fbef39568 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -661,6 +661,8 @@ workflows: filters: branches: only: + requires: + - hold - build-py-macos: filters: branches: From 11ac85b01ff55b73cd5d9befa06c3239e9ac11bb Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:04:51 +0200 Subject: [PATCH 084/198] add needed sudo in ubuntu machine scenario --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 359fbef39568..cdf6334988c8 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -325,8 +325,8 @@ jobs: - run: name: Install dependencies command: | - apt-get update - apt-get install -y cmake build-essential + sudo apt-get update + sudo apt-get install -y cmake build-essential - run: name: Build Libraries command: | From 2e131053e88d9a35178f00da34d2fb8fde437963 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:09:11 +0200 Subject: [PATCH 085/198] fix missing cmake in win msvc job --- .circleci/continue_config.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index cdf6334988c8..e61a86cf82e9 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -416,9 +416,14 @@ jobs: command: | git submodule sync git submodule update --init --recursive + - run: + name: Install dependencies + command: | + choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' - run: name: Build Libraries command: | + $Env:Path += ";C:\Program Files\CMake\bin" cd gpt4all-backend mkdir win-x64_msvc cd win-x64_msvc From ddd087dadb5cbb0681650b1cc51fc9195d0c035d Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:10:12 +0200 Subject: [PATCH 086/198] fix wrong cmake arg in macos job --- .circleci/continue_config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index e61a86cf82e9..5bc507760e63 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -361,8 +361,8 @@ jobs: cd gpt4all-backend mkdir osx-x64 cd osx-x64 - cmake .. - cmake --build . --parallel --config Release -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" + cmake .. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" + cmake --build . --parallel --config Release cp *.dylib "$NATIVE_DIR" - persist_to_workspace: root: gpt4all-backend From f4a0fc6cef8e7794e7679cdc89f0eed5f3735ba5 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:11:42 +0200 Subject: [PATCH 087/198] add holds --- .circleci/continue_config.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 5bc507760e63..099f03d17021 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -698,18 +698,26 @@ workflows: filters: branches: only: + requires: + - hold - build-bindings-backend-macos: filters: branches: only: + requires: + - hold - build-bindings-backend-windows: filters: branches: only: + requires: + - hold - build-bindings-backend-windows-msvc: filters: branches: only: + requires: + - hold build-csharp-deploy: when: << pipeline.parameters.run-csharp-workflow >> jobs: From 289c96cdf855ba52d8e6f17ab28a27456b1f240b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:13:36 +0200 Subject: [PATCH 088/198] remove bad cp --- .circleci/continue_config.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 099f03d17021..7a8f077c2b23 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -335,7 +335,6 @@ jobs: cd linux-x64 cmake .. cmake --build . --parallel --config Release - cp *.so "$NATIVE_DIR" - persist_to_workspace: root: gpt4all-backend paths: @@ -363,7 +362,6 @@ jobs: cd osx-x64 cmake .. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" cmake --build . --parallel --config Release - cp *.dylib "$NATIVE_DIR" - persist_to_workspace: root: gpt4all-backend paths: From 99ca80cf1ac08b132c4695821f644a93d2ed7ddd Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:19:21 +0200 Subject: [PATCH 089/198] change build-bindings-backend when condition --- .circleci/config.yml | 1 - .circleci/continue_config.yml | 44 +++++++++++++++++------------------ 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c28fdeaf1383..dc76e62fe268 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,7 +11,6 @@ workflows: base-revision: main config-path: .circleci/continue_config.yml mapping: | - gpt4all-bindings/.* run-bindings-workflow true gpt4all-bindings/python/.* run-python-workflow true gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 7a8f077c2b23..c3ed13fc5626 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -13,9 +13,6 @@ parameters: run-chat-workflow: type: boolean default: false - run-bindings-workflow: - type: boolean - default: false run-csharp-workflow: type: boolean default: false @@ -687,66 +684,69 @@ workflows: - build-py-windows - build-py-linux - build-py-macos - build-bindings-backend: - when: << pipeline.parameters.run-bindings-workflow >> + build-csharp-deploy: + when: << pipeline.parameters.run-csharp-workflow >> jobs: + - nuget-hold: + type: approval - hold: type: approval - - build-bindings-backend-linux: + - build-csharp-linux: filters: branches: only: requires: - hold - - build-bindings-backend-macos: + - build-csharp-windows: filters: branches: only: requires: - hold - - build-bindings-backend-windows: + - build-csharp-macos: filters: branches: only: requires: - hold - - build-bindings-backend-windows-msvc: + - store-and-upload-nupkgs: filters: branches: only: requires: - - hold - build-csharp-deploy: - when: << pipeline.parameters.run-csharp-workflow >> + - nuget-hold + - build-csharp-windows + - build-csharp-linux + - build-csharp-macos + build-bindings-backend: + when: + or: + - << pipeline.parameters.run-python-workflow >> + - << pipeline.parameters.run-csharp-workflow >> jobs: - - nuget-hold: - type: approval - hold: type: approval - - build-csharp-linux: + - build-bindings-backend-linux: filters: branches: only: requires: - hold - - build-csharp-windows: + - build-bindings-backend-macos: filters: branches: only: requires: - hold - - build-csharp-macos: + - build-bindings-backend-windows: filters: branches: only: requires: - hold - - store-and-upload-nupkgs: + - build-bindings-backend-windows-msvc: filters: branches: only: requires: - - nuget-hold - - build-csharp-windows - - build-csharp-linux - - build-csharp-macos \ No newline at end of file + - hold \ No newline at end of file From 51928cd6c342fe6818f3bfc90566ca6cc2533485 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:22:52 +0200 Subject: [PATCH 090/198] fix msvc putting file in target dir --- .circleci/continue_config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index c3ed13fc5626..3bfc9d20247f 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -427,7 +427,7 @@ jobs: - persist_to_workspace: root: gpt4all-backend paths: - - win-x64_msvc/*.dll + - win-x64_msvc/Release/*.dll build-csharp-linux: docker: From d107cccf182bd1a0a49371a4252f96bf3a7b2f49 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:32:04 +0200 Subject: [PATCH 091/198] msvc dll path fix --- .circleci/continue_config.yml | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 3bfc9d20247f..e9c875dde960 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -424,10 +424,23 @@ jobs: cd win-x64_msvc cmake -G "Visual Studio 17 2022" -A X64 .. cmake --build . --parallel --config Release + cp bin/Release/*.dll . - persist_to_workspace: root: gpt4all-backend paths: - - win-x64_msvc/Release/*.dll + - win-x64_msvc/*.dll + + build-bindings-backend-debug: + docker: + - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 + steps: + - setup_remote_docker + - attach_workspace: + at: /tmp/workspace + - run: + name: TEST - list libraries + command: | + ls -R /tmp/workspace build-csharp-linux: docker: @@ -718,6 +731,7 @@ workflows: - build-csharp-windows - build-csharp-linux - build-csharp-macos + build-bindings-backend: when: or: @@ -749,4 +763,14 @@ workflows: branches: only: requires: - - hold \ No newline at end of file + - hold + - build-bindings-backend-debug: + filters: + branches: + only: + requires: + - build-bindings-backend-linux + - build-bindings-backend-macos + - build-bindings-backend-windows + - build-bindings-backend-windows-msvc + \ No newline at end of file From 113c25e4de3120f7e6079eddd90d4f338f26874b Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 18 Jun 2023 23:47:22 +0200 Subject: [PATCH 092/198] fix win mingw dll path --- .circleci/continue_config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index e9c875dde960..66aa3d83f30e 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -394,6 +394,7 @@ jobs: cmake -G "MinGW Makefiles" .. cmake --build . --parallel --config Release cp "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin\*dll" . + cp bin/*.dll . - persist_to_workspace: root: gpt4all-backend paths: From 6d9575e1033d4745bfe85d958086ec0549f8e157 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 20:55:01 +0200 Subject: [PATCH 093/198] copy only needed mingw dlls --- .circleci/continue_config.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 66aa3d83f30e..0b105f0ac2c9 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -386,14 +386,17 @@ jobs: - run: name: Build Libraries command: | - $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + $MinGWBin = "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + $Env:Path += ";$MinGwBin" $Env:Path += ";C:\Program Files\CMake\bin" cd gpt4all-backend mkdir win-x64 cd win-x64 - cmake -G "MinGW Makefiles" .. + cmake -G "MinGW Makefiles" .. && cmake --build . --parallel --config Release - cp "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin\*dll" . + cp "$MinGWBin\libgcc*.dll" . + cp "$MinGWBin\libstdc++*.dll" . + cp "$MinGWBin\libwinpthread*.dll" . cp bin/*.dll . - persist_to_workspace: root: gpt4all-backend From 4b7b9975c57a179670e5c1c22aeb31220aaaa97e Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 21:25:11 +0200 Subject: [PATCH 094/198] add lib loading tests + remove dummy test --- .../csharp/Gpt4All.Tests/ModelFactoryTests.cs | 5 -- .../Gpt4All.Tests/NativeLibraryLoaderTests.cs | 56 +++++++++++++++++++ .../PlatformSpecificFactAttribute.cs | 27 +++++++++ gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs | 3 + 4 files changed, 86 insertions(+), 5 deletions(-) create mode 100644 gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs create mode 100644 gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs index bc5b50ed78f1..d7b0569e3986 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/ModelFactoryTests.cs @@ -31,9 +31,4 @@ public void CanLoadMptModel() { using var model = _modelFactory.LoadModel(Constants.MPT_MODEL_PATH); } - - [Fact] - public void DummyTest() - { - } } diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs new file mode 100644 index 000000000000..aaf3517432f6 --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs @@ -0,0 +1,56 @@ +using System.IO; +using Gpt4All.LibraryLoader; +using Xunit; + +namespace Gpt4All.Tests; + +public class NativeLibraryLoaderTests +{ + [Fact] + public void NativeLibraryShouldLoad() + { + var result = NativeLibraryLoader.LoadNativeLibrary(bypassLoading: false); + Assert.True(result.IsSuccess); + } + + private const string LLModelLib = "libllmodel.{0}"; + + [PlatformSpecificFact(Platforms.Windows)] + public void NativeLibraryShouldLoad_Windows() + { + var libraryLoader = new WindowsLibraryLoader(); + + var libraryPath = Path.Combine( + Environment.CurrentDirectory, + string.Format(LLModelLib, "dll")); + + var result = libraryLoader.OpenLibrary(libraryPath); + Assert.True(result.IsSuccess); + } + + [PlatformSpecificFact(Platforms.Linux)] + public void NativeLibraryShouldLoad_Linux() + { + var libraryLoader = new WindowsLibraryLoader(); + + var libraryPath = Path.Combine( + Environment.CurrentDirectory, + string.Format(LLModelLib, "so")); + + var result = libraryLoader.OpenLibrary(libraryPath); + Assert.True(result.IsSuccess); + } + + [PlatformSpecificFact(Platforms.MacOS)] + public void NativeLibraryShouldLoad_MacOS() + { + var libraryLoader = new WindowsLibraryLoader(); + + var libraryPath = Path.Combine( + Environment.CurrentDirectory, + string.Format(LLModelLib, "dylib")); + + var result = libraryLoader.OpenLibrary(libraryPath); + Assert.True(result.IsSuccess); + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs new file mode 100644 index 000000000000..9f322f6a353f --- /dev/null +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/PlatformSpecificFactAttribute.cs @@ -0,0 +1,27 @@ +using Xunit; + +namespace Gpt4All.Tests; + +public static class Platforms +{ + public const string Windows = "windows"; + public const string Linux = "linux"; + public const string MacOS = "macOS"; +} + +///

    +/// This attribute ensures the Fact is only run on the specified platform. +/// +/// +/// for info about the platform string. +/// +public class PlatformSpecificFactAttribute : FactAttribute +{ + public PlatformSpecificFactAttribute(string platform) + { + if (!OperatingSystem.IsOSPlatform(platform)) + { + Skip = $"Test only runs on {platform}."; + } + } +} diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs index 318361a08196..f24f5ba143be 100644 --- a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs +++ b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.cs @@ -1,8 +1,11 @@ using System.Diagnostics; +using System.Runtime.CompilerServices; using Gpt4All.Bindings; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; +[assembly: InternalsVisibleTo("Gpt4All.Tests")] + namespace Gpt4All; public class Gpt4All : IGpt4AllModel From 9eb50cc11589e140dea8aa3e67071cc590a6aa60 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 21:39:38 +0200 Subject: [PATCH 095/198] refine runtimes persist + c# linux build --- .circleci/continue_config.yml | 46 ++++++++++++++--------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 0b105f0ac2c9..9c8d2e17493b 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -328,14 +328,14 @@ jobs: name: Build Libraries command: | cd gpt4all-backend - mkdir linux-x64 - cd linux-x64 + mkdir -p runtimes/linux-x64 + cd runtimes/linux-x64 cmake .. cmake --build . --parallel --config Release - persist_to_workspace: root: gpt4all-backend paths: - - linux-x64/*.so + - runtimes/linux-x64/*.so build-bindings-backend-macos: macos: @@ -355,14 +355,14 @@ jobs: name: Build Libraries command: | cd gpt4all-backend - mkdir osx-x64 - cd osx-x64 + mkdir -p runtimes/osx-x64 + cd runtimes/osx-x64 cmake .. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" cmake --build . --parallel --config Release - persist_to_workspace: root: gpt4all-backend paths: - - osx-x64/*.dylib + - runtimes/osx-x64/*.dylib build-bindings-backend-windows: executor: @@ -390,8 +390,8 @@ jobs: $Env:Path += ";$MinGwBin" $Env:Path += ";C:\Program Files\CMake\bin" cd gpt4all-backend - mkdir win-x64 - cd win-x64 + mkdir runtimes/win-x64 + cd runtimes/win-x64 cmake -G "MinGW Makefiles" .. && cmake --build . --parallel --config Release cp "$MinGWBin\libgcc*.dll" . @@ -401,7 +401,7 @@ jobs: - persist_to_workspace: root: gpt4all-backend paths: - - win-x64/*.dll + - runtimes/win-x64/*.dll build-bindings-backend-windows-msvc: machine: @@ -424,15 +424,15 @@ jobs: command: | $Env:Path += ";C:\Program Files\CMake\bin" cd gpt4all-backend - mkdir win-x64_msvc - cd win-x64_msvc + mkdir runtimes/win-x64_msvc + cd runtimes/win-x64_msvc cmake -G "Visual Studio 17 2022" -A X64 .. cmake --build . --parallel --config Release cp bin/Release/*.dll . - persist_to_workspace: root: gpt4all-backend paths: - - win-x64_msvc/*.dll + - runtimes/win-x64_msvc/*.dll build-bindings-backend-debug: docker: @@ -444,7 +444,7 @@ jobs: - run: name: TEST - list libraries command: | - ls -R /tmp/workspace + ls -R /tmp/workspace/runtimes build-csharp-linux: docker: @@ -454,24 +454,14 @@ jobs: - restore_cache: keys: - gpt4all-csharp-nuget-packages-nix + - attach_workspace: + at: /tmp/workspace - run: - name: Install dependencies - command: | - apt-get update - apt-get install -y cmake build-essential - - run: - name: Build C library + name: "Prepare Native Libs" command: | - git submodule init - git submodule update --recursive cd gpt4all-bindings/csharp - BASE_DIR="runtimes/linux-x64" - NATIVE_DIR="$BASE_DIR/native" - BUILD_DIR="$BASE_DIR/build" - mkdir -p "$NATIVE_DIR" "$BUILD_DIR" - cmake -S ../../gpt4all-backend -B "$BUILD_DIR" - cmake --build "$BUILD_DIR" -j --config Release - cp "$BUILD_DIR"/*.so "$NATIVE_DIR" + mkdir -p runtimes/linux-x64/native + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - run: name: "Install project dependencies" command: | From 0277e8400a6b538ead6ac2c2909ae240a5afb6cd Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 21:40:37 +0200 Subject: [PATCH 096/198] debug ls --- .circleci/continue_config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 9c8d2e17493b..9570e3bacd7a 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -462,6 +462,7 @@ jobs: cd gpt4all-bindings/csharp mkdir -p runtimes/linux-x64/native cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + ls -R runtimes - run: name: "Install project dependencies" command: | From 380bbcf18f6848a5ea21671464a41ecde66b75b0 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 21:45:47 +0200 Subject: [PATCH 097/198] fix cmakelist path --- .circleci/continue_config.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 9570e3bacd7a..6173cff665c9 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -330,7 +330,7 @@ jobs: cd gpt4all-backend mkdir -p runtimes/linux-x64 cd runtimes/linux-x64 - cmake .. + cmake ../.. cmake --build . --parallel --config Release - persist_to_workspace: root: gpt4all-backend @@ -357,7 +357,7 @@ jobs: cd gpt4all-backend mkdir -p runtimes/osx-x64 cd runtimes/osx-x64 - cmake .. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" + cmake ../.. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" cmake --build . --parallel --config Release - persist_to_workspace: root: gpt4all-backend @@ -392,7 +392,7 @@ jobs: cd gpt4all-backend mkdir runtimes/win-x64 cd runtimes/win-x64 - cmake -G "MinGW Makefiles" .. && + cmake -G "MinGW Makefiles" ../.. cmake --build . --parallel --config Release cp "$MinGWBin\libgcc*.dll" . cp "$MinGWBin\libstdc++*.dll" . @@ -426,7 +426,7 @@ jobs: cd gpt4all-backend mkdir runtimes/win-x64_msvc cd runtimes/win-x64_msvc - cmake -G "Visual Studio 17 2022" -A X64 .. + cmake -G "Visual Studio 17 2022" -A X64 ../.. cmake --build . --parallel --config Release cp bin/Release/*.dll . - persist_to_workspace: From 4a99e6662ace1c1fb0ccb2757f5553eb77a34755 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Tue, 20 Jun 2023 21:55:43 +0200 Subject: [PATCH 098/198] fix csharp jobs deps --- .circleci/continue_config.yml | 55 ++++++++++------------------------- 1 file changed, 15 insertions(+), 40 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 6173cff665c9..21ada228dc6c 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -491,10 +491,6 @@ jobs: trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - persist_to_workspace: - root: gpt4all-bindings/csharp - paths: - - runtimes/linux-x64/native build-csharp-windows: executor: @@ -506,22 +502,15 @@ jobs: - restore_cache: keys: - gpt4all-csharp-nuget-packages-win + - attach_workspace: + at: /tmp/workspace - run: - name: Install MinGW64 - command: choco install -y mingw --force --no-progress - - run: - name: Install dependencies - command: | - choco install -y cmake --installargs 'ADD_CMAKE_TO_PATH=System' - - run: - name: Build C library + name: "Prepare Native Libs" command: | - git submodule init - git submodule update --recursive cd gpt4all-bindings/csharp - $Env:Path += ";C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" - $Env:Path += ";C:\Program Files\CMake\bin" - ./build_win-mingw.ps1 + mkdir -p runtimes/win-x64/native + cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ + ls -R runtimes - run: name: "Install project dependencies" command: | @@ -550,10 +539,6 @@ jobs: trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - persist_to_workspace: - root: gpt4all-bindings/csharp - paths: - - runtimes/win-x64/native/ build-csharp-macos: macos: @@ -566,25 +551,16 @@ jobs: - run: name: Install dependencies command: | - brew install cmake brew install --cask dotnet-sdk - # curl https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.sh -o dotnet-install.sh - # chmod +x dotnet-install.sh - # ./dotnet-install.sh -Channel 7.0 --install-dir $HOME/cli-tools - # $HOME/cli-tools/dotnet --info + - attach_workspace: + at: /tmp/workspace - run: - name: Build C library + name: "Prepare Native Libs" command: | - git submodule init - git submodule update --recursive cd gpt4all-bindings/csharp - BASE_DIR="runtimes/osx-x64" - NATIVE_DIR="$BASE_DIR/native" - BUILD_DIR="$BASE_DIR/build" - mkdir -p "$NATIVE_DIR" "$BUILD_DIR" - cmake -S ../../gpt4all-backend -B "$BUILD_DIR" -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" - cmake --build "$BUILD_DIR" -j --config Release - cp "$BUILD_DIR"/*.dylib "$NATIVE_DIR" + mkdir -p runtimes/osx-x64/native + cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/win-x64/native/ + ls -R runtimes - run: name: "Install project dependencies" command: | @@ -613,10 +589,6 @@ jobs: trx2junit TestResults/*.trx - store_test_results: path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults - - persist_to_workspace: - root: gpt4all-bindings/csharp - paths: - - runtimes/osx-x64/native store-and-upload-nupkgs: docker: @@ -705,18 +677,21 @@ workflows: only: requires: - hold + - build-bindings-backend-linux - build-csharp-windows: filters: branches: only: requires: - hold + - build-bindings-backend-windows - build-csharp-macos: filters: branches: only: requires: - hold + - build-bindings-backend-macos - store-and-upload-nupkgs: filters: branches: From cac18c273e4cb841c44f5ba4fdb404cac00e8255 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Wed, 21 Jun 2023 21:10:12 +0200 Subject: [PATCH 099/198] More experiments --- .circleci/continue_config.yml | 329 +++++++++++++++++----------------- 1 file changed, 165 insertions(+), 164 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 21ada228dc6c..ff4d9aff605c 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -450,47 +450,50 @@ jobs: docker: - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/linux-x64/native - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + - when: + condition: << pipeline.parameters.run-csharp-workflow >> + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/linux-x64/native + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-nuget-packages-nix + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:$HOME/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-windows: executor: @@ -498,97 +501,103 @@ jobs: size: large shell: powershell.exe -ExecutionPolicy Bypass steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-win - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/win-x64/native - cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet.exe restore Gpt4All - - save_cache: - paths: - - C:\Users\circleci\.nuget\packages - key: gpt4all-csharp-nuget-packages-win - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + - when: + condition: << pipeline.parameters.run-csharp-workflow >> + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-win + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/win-x64/native + cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet.exe restore Gpt4All + - save_cache: + paths: + - C:\Users\circleci\.nuget\packages + key: gpt4all-csharp-nuget-packages-win + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet.exe build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-macos: macos: xcode: "14.0.0" steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: Install dependencies - command: | - brew install --cask dotnet-sdk - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/osx-x64/native - cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/win-x64/native/ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + - when: + condition: << pipeline.parameters.run-csharp-workflow >> + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - run: + name: Install dependencies + command: | + brew install --cask dotnet-sdk + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/osx-x64/native + cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/win-x64/native/ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-nuget-packages-nix + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:$HOME/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults store-and-upload-nupkgs: docker: @@ -664,83 +673,75 @@ workflows: - build-py-windows - build-py-linux - build-py-macos - build-csharp-deploy: - when: << pipeline.parameters.run-csharp-workflow >> + build-bindings: + when: + or: + - << pipeline.parameters.run-python-workflow >> + - << pipeline.parameters.run-csharp-workflow >> jobs: - - nuget-hold: - type: approval - hold: type: approval - - build-csharp-linux: + - nuget-hold: + type: approval + - build-bindings-backend-linux: filters: branches: only: requires: - hold - - build-bindings-backend-linux - - build-csharp-windows: + - build-bindings-backend-macos: filters: branches: only: requires: - hold - - build-bindings-backend-windows - - build-csharp-macos: + - build-bindings-backend-windows: filters: branches: only: requires: - hold - - build-bindings-backend-macos - - store-and-upload-nupkgs: + - build-bindings-backend-windows-msvc: filters: branches: only: requires: - - nuget-hold - - build-csharp-windows - - build-csharp-linux - - build-csharp-macos - - build-bindings-backend: - when: - or: - - << pipeline.parameters.run-python-workflow >> - - << pipeline.parameters.run-csharp-workflow >> - jobs: - - hold: - type: approval - - build-bindings-backend-linux: + - hold + - build-bindings-backend-debug: filters: branches: only: requires: - - hold - - build-bindings-backend-macos: + - build-bindings-backend-linux + - build-bindings-backend-macos + - build-bindings-backend-windows + - build-bindings-backend-windows-msvc + # CSharp Jobs + - build-csharp-linux: filters: branches: only: requires: - - hold - - build-bindings-backend-windows: + - build-bindings-backend-linux + - build-csharp-windows: filters: branches: only: requires: - - hold - - build-bindings-backend-windows-msvc: + - build-bindings-backend-windows + - build-csharp-macos: filters: branches: only: requires: - - hold - - build-bindings-backend-debug: + - build-bindings-backend-macos + - store-and-upload-nupkgs: filters: branches: only: requires: - - build-bindings-backend-linux - - build-bindings-backend-macos - - build-bindings-backend-windows - - build-bindings-backend-windows-msvc + - nuget-hold + - build-csharp-windows + - build-csharp-linux + - build-csharp-macos \ No newline at end of file From 620ccda696098357ad536012f6fe3532ec0c7fd4 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Wed, 21 Jun 2023 21:21:11 +0200 Subject: [PATCH 100/198] try fix --- .circleci/continue_config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index ff4d9aff605c..2483109c08a7 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -444,7 +444,7 @@ jobs: - run: name: TEST - list libraries command: | - ls -R /tmp/workspace/runtimes + ls -R /tmp/workspace build-csharp-linux: docker: @@ -464,6 +464,7 @@ jobs: command: | cd gpt4all-bindings/csharp mkdir -p runtimes/linux-x64/native + ls -R /tmp/workspace cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ ls -R runtimes - run: From 2927d11a285962293b29f32e8bb89fce534aea45 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Wed, 21 Jun 2023 21:29:59 +0200 Subject: [PATCH 101/198] fix --- .circleci/continue_config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 2483109c08a7..1700d2b3a6c4 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -454,6 +454,7 @@ jobs: condition: << pipeline.parameters.run-csharp-workflow >> steps: - checkout + - setup_remote_docker - restore_cache: keys: - gpt4all-csharp-nuget-packages-nix @@ -465,7 +466,7 @@ jobs: cd gpt4all-bindings/csharp mkdir -p runtimes/linux-x64/native ls -R /tmp/workspace - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + cp /tmp/workspace/runtimes/linux-x64/* runtimes/linux-x64/native/ ls -R runtimes - run: name: "Install project dependencies" From f3b6f49684b47fd71fad0f4cd5eb5e710983b1c6 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sat, 24 Jun 2023 22:13:59 +0200 Subject: [PATCH 102/198] fix workspace symlinks on unix, fix persist_workspace on windows and macos runtimes dir --- .circleci/continue_config.yml | 50 +++++++++++------------------------ 1 file changed, 16 insertions(+), 34 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 1700d2b3a6c4..2c5cb8e93f73 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -328,10 +328,12 @@ jobs: name: Build Libraries command: | cd gpt4all-backend - mkdir -p runtimes/linux-x64 - cd runtimes/linux-x64 + mkdir -p runtimes/build + cd runtimes/build cmake ../.. cmake --build . --parallel --config Release + mkdir ../linux-x64 + cp -L *.so ../linux-x64 # otherwise persist_to_workspace seems to mess symlinks - persist_to_workspace: root: gpt4all-backend paths: @@ -355,10 +357,12 @@ jobs: name: Build Libraries command: | cd gpt4all-backend - mkdir -p runtimes/osx-x64 - cd runtimes/osx-x64 + mkdir -p runtimes/build + cd runtimes/build cmake ../.. -DCMAKE_OSX_ARCHITECTURES="x86_64;arm64" cmake --build . --parallel --config Release + mkdir ../osx-x64 + cp -L *.dylib ../osx-x64 - persist_to_workspace: root: gpt4all-backend paths: @@ -434,18 +438,6 @@ jobs: paths: - runtimes/win-x64_msvc/*.dll - build-bindings-backend-debug: - docker: - - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 - steps: - - setup_remote_docker - - attach_workspace: - at: /tmp/workspace - - run: - name: TEST - list libraries - command: | - ls -R /tmp/workspace - build-csharp-linux: docker: - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 @@ -454,10 +446,6 @@ jobs: condition: << pipeline.parameters.run-csharp-workflow >> steps: - checkout - - setup_remote_docker - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - attach_workspace: at: /tmp/workspace - run: @@ -468,6 +456,9 @@ jobs: ls -R /tmp/workspace cp /tmp/workspace/runtimes/linux-x64/* runtimes/linux-x64/native/ ls -R runtimes + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix - run: name: "Install project dependencies" command: | @@ -511,13 +502,13 @@ jobs: keys: - gpt4all-csharp-nuget-packages-win - attach_workspace: - at: /tmp/workspace + at: C:\Users\circleci\workspace - run: name: "Prepare Native Libs" command: | cd gpt4all-bindings/csharp - mkdir -p runtimes/win-x64/native - cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ + mkdir -p runtimes\win-x64\native + cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ ls -R runtimes - run: name: "Install project dependencies" @@ -569,8 +560,8 @@ jobs: name: "Prepare Native Libs" command: | cd gpt4all-bindings/csharp - mkdir -p runtimes/osx-x64/native - cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/win-x64/native/ + mkdir -p runtimes/osx/native + cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ ls -R runtimes - run: name: "Install project dependencies" @@ -709,15 +700,6 @@ workflows: only: requires: - hold - - build-bindings-backend-debug: - filters: - branches: - only: - requires: - - build-bindings-backend-linux - - build-bindings-backend-macos - - build-bindings-backend-windows - - build-bindings-backend-windows-msvc # CSharp Jobs - build-csharp-linux: filters: From c92c1af697222c1222b06a7603f84c56644c13b4 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 25 Jun 2023 22:52:34 +0200 Subject: [PATCH 103/198] nuget pack and push --- .circleci/continue_config.yml | 26 ++++++++++++++----- .../csharp/Gpt4All/Gpt4All.csproj | 8 ++++++ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 2c5cb8e93f73..3673541875eb 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -453,8 +453,7 @@ jobs: command: | cd gpt4all-bindings/csharp mkdir -p runtimes/linux-x64/native - ls -R /tmp/workspace - cp /tmp/workspace/runtimes/linux-x64/* runtimes/linux-x64/native/ + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ ls -R runtimes - restore_cache: keys: @@ -596,13 +595,26 @@ jobs: docker: - image: mcr.microsoft.com/dotnet/sdk:6.0-jammy # Ubuntu 22.04 steps: - - setup_remote_docker - attach_workspace: at: /tmp/workspace - - run: - name: TEST - list libraries - command: | - ls -R /tmp/workspace + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - run: + name: NuGet Pack + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/linux-x64/native + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + mkdir -p runtimes/win-x64/native + cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ + mkdir -p runtimes/osx/native + cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ + dotnet pack ./Gpt4All/Gpt4All.csproj -p:IncludeSymbols=true -p:SymbolPackageFormat=snupkg -c Release + dotnet nuget push ./Gpt4All/bin/Release/Gpt4All.*.nupkg -s $NUGET_URL -k $NUGET_TOKEN --skip-duplicate + - store_artifacts: + path: gpt4all-bindings/csharp/Gpt4All/bin/Release workflows: version: 2 diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj index 728855124dcb..ed4ed8247160 100644 --- a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj +++ b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj @@ -5,6 +5,14 @@ enable true + + + + + + + + From 4d0201ac3366acfa0834f7d01f634a78f34295f5 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 9 Jul 2023 18:02:34 +0200 Subject: [PATCH 104/198] copy metal kernels for macos --- .circleci/continue_config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 3673541875eb..a2877e405f2e 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -363,10 +363,13 @@ jobs: cmake --build . --parallel --config Release mkdir ../osx-x64 cp -L *.dylib ../osx-x64 + cp ../../llama.cpp-mainline/*.metal ../osx-x64 + ls ../osx-x64 - persist_to_workspace: root: gpt4all-backend paths: - runtimes/osx-x64/*.dylib + - runtimes/osx-x64/*.metal build-bindings-backend-windows: executor: From 991b7468c9d4703da758b9101356be3e8b1f0d4a Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 9 Jul 2023 18:09:58 +0200 Subject: [PATCH 105/198] fix native lib loader tests --- .../csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs b/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs index aaf3517432f6..7d5645931c99 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs +++ b/gpt4all-bindings/csharp/Gpt4All.Tests/NativeLibraryLoaderTests.cs @@ -31,7 +31,7 @@ public void NativeLibraryShouldLoad_Windows() [PlatformSpecificFact(Platforms.Linux)] public void NativeLibraryShouldLoad_Linux() { - var libraryLoader = new WindowsLibraryLoader(); + var libraryLoader = new LinuxLibraryLoader(); var libraryPath = Path.Combine( Environment.CurrentDirectory, @@ -44,7 +44,7 @@ public void NativeLibraryShouldLoad_Linux() [PlatformSpecificFact(Platforms.MacOS)] public void NativeLibraryShouldLoad_MacOS() { - var libraryLoader = new WindowsLibraryLoader(); + var libraryLoader = new MacOsLibraryLoader(); var libraryPath = Path.Combine( Environment.CurrentDirectory, From 7efb43c2e4a08e172c8bc29c7063c1efd34e109a Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 9 Jul 2023 19:30:47 +0200 Subject: [PATCH 106/198] copy metal kernels on macos builds --- gpt4all-bindings/csharp/Directory.Build.props | 2 +- gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj | 3 ++- gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/gpt4all-bindings/csharp/Directory.Build.props b/gpt4all-bindings/csharp/Directory.Build.props index 9f7cf5bf4450..00ea66f0912f 100644 --- a/gpt4all-bindings/csharp/Directory.Build.props +++ b/gpt4all-bindings/csharp/Directory.Build.props @@ -5,7 +5,7 @@ en-US - 0.6.1-alpha + 0.6.2-alpha $(VersionSuffix) $(Version)$(VersionSuffix) true diff --git a/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj b/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj index 9eb01e146604..543acdc34638 100644 --- a/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj +++ b/gpt4all-bindings/csharp/Gpt4All.Samples/Gpt4All.Samples.csproj @@ -1,4 +1,4 @@ - + Exe @@ -27,5 +27,6 @@ + diff --git a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj index ed4ed8247160..416ad4a8b72d 100644 --- a/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj +++ b/gpt4all-bindings/csharp/Gpt4All/Gpt4All.csproj @@ -12,6 +12,9 @@ + + true + From b96b6ef38f2313020d8c138d30c5c7ea8406d9c3 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 9 Jul 2023 19:46:58 +0200 Subject: [PATCH 107/198] pack metal files nuget --- .circleci/continue_config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index a2877e405f2e..303c441c7a9d 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -564,6 +564,7 @@ jobs: cd gpt4all-bindings/csharp mkdir -p runtimes/osx/native cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ + cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ ls -R runtimes - run: name: "Install project dependencies" @@ -614,6 +615,7 @@ jobs: cp /tmp/workspace/runtimes/win-x64/*.dll runtimes/win-x64/native/ mkdir -p runtimes/osx/native cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ + cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ dotnet pack ./Gpt4All/Gpt4All.csproj -p:IncludeSymbols=true -p:SymbolPackageFormat=snupkg -c Release dotnet nuget push ./Gpt4All/bin/Release/Gpt4All.*.nupkg -s $NUGET_URL -k $NUGET_TOKEN --skip-duplicate - store_artifacts: From 8a31239e9013a4b223f256badcc23d5d5679b067 Mon Sep 17 00:00:00 2001 From: mvenditto Date: Sun, 9 Jul 2023 19:48:28 +0200 Subject: [PATCH 108/198] bump version --- gpt4all-bindings/csharp/Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/csharp/Directory.Build.props b/gpt4all-bindings/csharp/Directory.Build.props index 00ea66f0912f..75e32e34ae8c 100644 --- a/gpt4all-bindings/csharp/Directory.Build.props +++ b/gpt4all-bindings/csharp/Directory.Build.props @@ -5,7 +5,7 @@ en-US - 0.6.2-alpha + 0.6.3-alpha $(VersionSuffix) $(Version)$(VersionSuffix) true From e9d42fba358d52eda3c070889e08f1f508e9c8da Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Tue, 11 Jul 2023 18:54:26 -0400 Subject: [PATCH 109/198] Don't show first start more than once. --- gpt4all-chat/main.qml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 5bb723bc527c..99ba585325e3 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -88,6 +88,7 @@ Window { } property bool hasShownModelDownload: false + property bool hasShownFirstStart: false function startupDialogs() { if (!LLM.compatHardware) { @@ -97,8 +98,9 @@ Window { } // check for first time start of this version - if (Download.isFirstStart()) { + if (!hasShownFirstStart && Download.isFirstStart()) { firstStartDialog.open(); + hasShownFirstStart = true; return; } From 13b2d47be54c56ab8675a8180f6b1cfdc5c493d9 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 08:50:21 -0400 Subject: [PATCH 110/198] Provide an error dialog if for any reason we can't access the settings file. --- gpt4all-chat/llm.cpp | 9 ++++++++- gpt4all-chat/llm.h | 6 ++---- gpt4all-chat/main.qml | 24 +++++++++++++++++++++++- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index 8a69d6e739b4..f831ea477a32 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include class MyLLM: public LLM { }; @@ -48,7 +49,13 @@ LLM::LLM() #endif m_compatHardware = minimal; - emit compatHardwareChanged(); +} + +bool LLM::hasSettingsAccess() const +{ + QSettings settings; + settings.sync(); + return settings.status() == QSettings::NoError; } bool LLM::checkForUpdates() const diff --git a/gpt4all-chat/llm.h b/gpt4all-chat/llm.h index 8b8894fa2021..8a582e3a23ec 100644 --- a/gpt4all-chat/llm.h +++ b/gpt4all-chat/llm.h @@ -6,12 +6,11 @@ class LLM : public QObject { Q_OBJECT - Q_PROPERTY(bool compatHardware READ compatHardware NOTIFY compatHardwareChanged) - public: static LLM *globalInstance(); - bool compatHardware() const { return m_compatHardware; } + Q_INVOKABLE bool hasSettingsAccess() const; + Q_INVOKABLE bool compatHardware() const { return m_compatHardware; } Q_INVOKABLE bool checkForUpdates() const; Q_INVOKABLE bool directoryExists(const QString &path) const; @@ -22,7 +21,6 @@ class LLM : public QObject Q_SIGNALS: void chatListModelChanged(); void modelListChanged(); - void compatHardwareChanged(); private: bool m_compatHardware; diff --git a/gpt4all-chat/main.qml b/gpt4all-chat/main.qml index 99ba585325e3..6c850805b120 100644 --- a/gpt4all-chat/main.qml +++ b/gpt4all-chat/main.qml @@ -89,14 +89,22 @@ Window { property bool hasShownModelDownload: false property bool hasShownFirstStart: false + property bool hasShownSettingsAccess: false function startupDialogs() { - if (!LLM.compatHardware) { + if (!LLM.compatHardware()) { Network.sendNonCompatHardware(); errorCompatHardware.open(); return; } + // check if we have access to settings and if not show an error + if (!hasShownSettingsAccess && !LLM.hasSettingsAccess()) { + errorSettingsAccess.open(); + hasShownSettingsAccess = true; + return; + } + // check for first time start of this version if (!hasShownFirstStart && Download.isFirstStart()) { firstStartDialog.open(); @@ -135,6 +143,20 @@ Window { + qsTr("https://en.wikipedia.org/wiki/Advanced_Vector_Extensions") } + PopupDialog { + id: errorSettingsAccess + anchors.centerIn: parent + shouldTimeOut: false + shouldShowBusy: false + modal: true + text: qsTr("

    Encountered an error starting up:


    ") + + qsTr("\"Inability to access settings file.\"") + + qsTr("

    Unfortunately, something is preventing the program from accessing ") + + qsTr("the settings file. This could be caused by incorrect permissions in the local ") + + qsTr("app config directory where the settings file is located. ") + + qsTr("Check out our discord channel for help.") + } + StartupDialog { id: firstStartDialog anchors.centerIn: parent From 0d726b22b8e2783ae712b83f25ed76d51daf4220 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 10:34:10 -0400 Subject: [PATCH 111/198] When we explicitly cancel an operation we shouldn't throw an error. --- gpt4all-chat/chatgpt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index 13e9cd5db0bb..c68da5df75d1 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -244,7 +244,7 @@ void ChatGPTWorker::handleReadyRead() void ChatGPTWorker::handleErrorOccurred(QNetworkReply::NetworkError code) { QNetworkReply *reply = qobject_cast(sender()); - if (!reply) { + if (!reply || reply->error() == QNetworkReply::OperationCanceledError /*when we call abort on purpose*/) { emit finished(); return; } From f0faa23ad5c248c0597c81c853bf8bdfe7d43a80 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 10:49:24 -0400 Subject: [PATCH 112/198] cmakelists: always export build commands (#1179) friendly for using editors with clangd integration that don't also manage the build themselves --- gpt4all-backend/CMakeLists.txt | 1 + gpt4all-chat/CMakeLists.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/gpt4all-backend/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt index 80a3f000f14a..5f91a88ecd4d 100644 --- a/gpt4all-backend/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required(VERSION 3.16) set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) if(APPLE) option(BUILD_UNIVERSAL "Build a Universal binary on macOS" ON) diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index cfd68ae0aa3a..a94eb844f7c3 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required(VERSION 3.16) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) From ad0e7fd01f988c12609439400a8f0b3a307dae1d Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 10:47:13 -0400 Subject: [PATCH 113/198] chatgpt: ensure no extra newline in header --- gpt4all-chat/chatgpt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/chatgpt.cpp b/gpt4all-chat/chatgpt.cpp index c68da5df75d1..11b0be49627f 100644 --- a/gpt4all-chat/chatgpt.cpp +++ b/gpt4all-chat/chatgpt.cpp @@ -155,7 +155,7 @@ void ChatGPTWorker::request(const QString &apiKey, m_ctx = promptCtx; QUrl openaiUrl("https://api.openai.com/v1/chat/completions"); - const QString authorization = QString("Bearer %1").arg(apiKey); + const QString authorization = QString("Bearer %1").arg(apiKey).trimmed(); QNetworkRequest request(openaiUrl); request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); request.setRawHeader("Authorization", authorization.toUtf8()); From 95b8fb312e5df8ce08a583c67f1e6d1e98985a21 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 11:44:08 -0400 Subject: [PATCH 114/198] windows/msvc: use high level processor feature detection API see https://learn.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-isprocessorfeaturepresent --- gpt4all-backend/llmodel.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index d7c721e2db3c..24b424c579e6 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -10,6 +10,9 @@ #include #include #include +#ifdef _MSC_VER +#include +#endif std::string s_implementations_search_path = "."; @@ -18,9 +21,7 @@ static bool has_at_least_minimal_hardware() { #ifndef _MSC_VER return __builtin_cpu_supports("avx"); #else - int cpuInfo[4]; - __cpuid(cpuInfo, 1); - return cpuInfo[2] & (1 << 28); + return IsProcessorFeaturePresent(PF_AVX_INSTRUCTIONS_AVAILABLE); #endif #else return true; // Don't know how to handle non-x86_64 @@ -32,9 +33,7 @@ static bool requires_avxonly() { #ifndef _MSC_VER return !__builtin_cpu_supports("avx2"); #else - int cpuInfo[4]; - __cpuidex(cpuInfo, 7, 0); - return !(cpuInfo[1] & (1 << 5)); + return !IsProcessorFeaturePresent(PF_AVX2_INSTRUCTIONS_AVAILABLE); #endif #else return false; // Don't know how to handle non-x86_64 From 432b7ebbd7af4657cf9e7e595dadae69a55b8d32 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 12:04:56 -0400 Subject: [PATCH 115/198] include windows.h just to be safe --- gpt4all-backend/llmodel.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index 24b424c579e6..fdf3597a8b2e 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -11,6 +11,7 @@ #include #include #ifdef _MSC_VER +#include #include #endif From e9897518d1c132f0f75d3ebc1a6d7a4b53b0bbee Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 11:46:40 -0400 Subject: [PATCH 116/198] Show busy if models.json download taking longer than expected. --- gpt4all-chat/modellist.cpp | 26 +++++++++++++++++++++- gpt4all-chat/modellist.h | 5 +++++ gpt4all-chat/qml/ModelDownloaderDialog.qml | 10 ++++++++- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 1a622c0780ad..425c1c85d35b 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -222,6 +222,7 @@ ModelList::ModelList() : QAbstractListModel(nullptr) , m_installedModels(new InstalledModels(this)) , m_downloadableModels(new DownloadableModels(this)) + , m_asyncModelRequestOngoing(false) { m_installedModels->setSourceModel(this); m_downloadableModels->setSourceModel(this); @@ -899,6 +900,9 @@ void ModelList::updateModelsFromJson() void ModelList::updateModelsFromJsonAsync() { + m_asyncModelRequestOngoing = true; + emit asyncModelRequestOngoingChanged(); + #if defined(USE_LOCAL_MODELSJSON) QUrl jsonUrl("file://" + QDir::homePath() + "/dev/large_language_models/gpt4all/gpt4all-chat/metadata/models.json"); #else @@ -911,17 +915,37 @@ void ModelList::updateModelsFromJsonAsync() QNetworkReply *jsonReply = m_networkManager.get(request); connect(qApp, &QCoreApplication::aboutToQuit, jsonReply, &QNetworkReply::abort); connect(jsonReply, &QNetworkReply::finished, this, &ModelList::handleModelsJsonDownloadFinished); + connect(jsonReply, &QNetworkReply::errorOccurred, this, &ModelList::handleModelsJsonDownloadErrorOccurred); } void ModelList::handleModelsJsonDownloadFinished() { QNetworkReply *jsonReply = qobject_cast(sender()); - if (!jsonReply) + if (!jsonReply) { + m_asyncModelRequestOngoing = false; + emit asyncModelRequestOngoingChanged(); return; + } QByteArray jsonData = jsonReply->readAll(); jsonReply->deleteLater(); parseModelsJsonFile(jsonData, true); + m_asyncModelRequestOngoing = false; + emit asyncModelRequestOngoingChanged(); +} + +void ModelList::handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code) +{ + // TODO: Show what error occured in the GUI + m_asyncModelRequestOngoing = false; + emit asyncModelRequestOngoingChanged(); + + QNetworkReply *reply = qobject_cast(sender()); + if (!reply) + return; + + qWarning() << QString("ERROR: Modellist download failed with error code \"%1-%2\"") + .arg(code).arg(reply->errorString()).toStdString(); } void ModelList::handleSslErrors(QNetworkReply *reply, const QList &errors) diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index c749254c46f4..b2d403325d97 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -169,6 +169,7 @@ class ModelList : public QAbstractListModel Q_PROPERTY(InstalledModels* installedModels READ installedModels NOTIFY installedModelsChanged) Q_PROPERTY(DownloadableModels* downloadableModels READ downloadableModels NOTIFY downloadableModelsChanged) Q_PROPERTY(QList userDefaultModelList READ userDefaultModelList NOTIFY userDefaultModelListChanged) + Q_PROPERTY(bool asyncModelRequestOngoing READ asyncModelRequestOngoing NOTIFY asyncModelRequestOngoingChanged) public: static ModelList *globalInstance(); @@ -296,12 +297,14 @@ class ModelList : public QAbstractListModel } QString incompleteDownloadPath(const QString &modelFile); + bool asyncModelRequestOngoing() const { return m_asyncModelRequestOngoing; } Q_SIGNALS: void countChanged(); void installedModelsChanged(); void downloadableModelsChanged(); void userDefaultModelListChanged(); + void asyncModelRequestOngoingChanged(); private Q_SLOTS: void updateModelsFromJson(); @@ -310,6 +313,7 @@ private Q_SLOTS: void updateModelsFromDirectory(); void updateDataForSettings(); void handleModelsJsonDownloadFinished(); + void handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code); void handleSslErrors(QNetworkReply *reply, const QList &errors); private: @@ -328,6 +332,7 @@ private Q_SLOTS: QList m_models; QHash m_modelMap; QFileSystemWatcher *m_watcher; + bool m_asyncModelRequestOngoing; private: explicit ModelList(); diff --git a/gpt4all-chat/qml/ModelDownloaderDialog.qml b/gpt4all-chat/qml/ModelDownloaderDialog.qml index 000e05f227b4..4decb63f470d 100644 --- a/gpt4all-chat/qml/ModelDownloaderDialog.qml +++ b/gpt4all-chat/qml/ModelDownloaderDialog.qml @@ -41,7 +41,7 @@ MyDialog { } Label { - visible: !ModelList.downloadableModels.count + visible: !ModelList.downloadableModels.count && !ModelList.asyncModelRequestOngoing Layout.fillWidth: true Layout.fillHeight: true horizontalAlignment: Qt.AlignHCenter @@ -50,6 +50,14 @@ MyDialog { color: theme.mutedTextColor } + MyBusyIndicator { + visible: !ModelList.downloadableModels.count && ModelList.asyncModelRequestOngoing + running: ModelList.asyncModelRequestOngoing + Accessible.role: Accessible.Animation + Accessible.name: qsTr("Busy indicator") + Accessible.description: qsTr("Displayed when the models request is ongoing") + } + ScrollView { id: scrollView ScrollBar.vertical.policy: ScrollBar.AlwaysOn From 10ca2c4475eca31278ce7dba1c72f79d3f6ad692 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 11:51:07 -0400 Subject: [PATCH 117/198] center the spinner --- gpt4all-chat/qml/ModelDownloaderDialog.qml | 1 + 1 file changed, 1 insertion(+) diff --git a/gpt4all-chat/qml/ModelDownloaderDialog.qml b/gpt4all-chat/qml/ModelDownloaderDialog.qml index 4decb63f470d..5100c4490a74 100644 --- a/gpt4all-chat/qml/ModelDownloaderDialog.qml +++ b/gpt4all-chat/qml/ModelDownloaderDialog.qml @@ -54,6 +54,7 @@ MyDialog { visible: !ModelList.downloadableModels.count && ModelList.asyncModelRequestOngoing running: ModelList.asyncModelRequestOngoing Accessible.role: Accessible.Animation + Layout.alignment: Qt.AlignCenter Accessible.name: qsTr("Busy indicator") Accessible.description: qsTr("Displayed when the models request is ongoing") } From 5df4f1bf8cc42e7b281cd4febee6ed3f354c5992 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 12:47:09 -0400 Subject: [PATCH 118/198] codespell --- gpt4all-chat/modellist.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 425c1c85d35b..dc98ce36cc93 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -936,7 +936,7 @@ void ModelList::handleModelsJsonDownloadFinished() void ModelList::handleModelsJsonDownloadErrorOccurred(QNetworkReply::NetworkError code) { - // TODO: Show what error occured in the GUI + // TODO: Show what error occurred in the GUI m_asyncModelRequestOngoing = false; emit asyncModelRequestOngoingChanged(); From 60627bd41f49c5363c7e2abecc9b3be6ce59570f Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 12:45:08 -0400 Subject: [PATCH 119/198] Prefer 7b models in order of default model load. --- gpt4all-chat/modellist.cpp | 39 ++++---------------------------------- gpt4all-chat/modellist.h | 1 - 2 files changed, 4 insertions(+), 36 deletions(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index dc98ce36cc93..5f43e0fdc81b 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -161,16 +161,6 @@ int InstalledModels::count() const return rowCount(); } -QString InstalledModels::firstId() const -{ - if (rowCount() > 0) { - QModelIndex firstIndex = index(0, 0); - return sourceModel()->data(firstIndex, ModelList::IdRole).toString(); - } else { - return QString(); - } -} - DownloadableModels::DownloadableModels(QObject *parent) : QSortFilterProxyModel(parent) , m_expanded(false) @@ -298,12 +288,9 @@ ModelInfo ModelList::defaultModelInfo() const settings.sync(); // The user default model can be set by the user in the settings dialog. The "default" user - // default model is "Application default" which signals we should use the default model that was - // specified by the models.json file. + // default model is "Application default" which signals we should use the logic here. const QString userDefaultModelName = MySettings::globalInstance()->userDefaultModel(); const bool hasUserDefaultName = !userDefaultModelName.isEmpty() && userDefaultModelName != "Application default"; - const QString defaultModelName = settings.value("defaultModel").toString(); - const bool hasDefaultName = hasUserDefaultName ? false : !defaultModelName.isEmpty(); ModelInfo *defaultModel = nullptr; for (ModelInfo *info : m_models) { @@ -311,12 +298,10 @@ ModelInfo ModelList::defaultModelInfo() const continue; defaultModel = info; - // If we don't have either setting, then just use the first model that is installed - if (!hasUserDefaultName && !hasDefaultName) - break; + const size_t ramrequired = defaultModel->ramrequired; - // If we don't have a user specified default, but *do* have a default setting and match, then use it - if (!hasUserDefaultName && hasDefaultName && (defaultModel->id() == defaultModelName)) + // If we don't have either setting, then just use the first model that requires less than 16GB that is installed + if (!hasUserDefaultName && !info->isChatGPT && ramrequired > 0 && ramrequired < 16) break; // If we have a user specified default and match, then use it @@ -847,14 +832,6 @@ void ModelList::updateModelsFromDirectory() processDirectory(exePath); if (localPath != exePath) processDirectory(localPath); - - if (installedModels()->count()) { - const QString firstModel = - installedModels()->firstId(); - QSettings settings; - settings.setValue("defaultModel", firstModel); - settings.sync(); - } } void ModelList::updateModelsFromJson() @@ -1132,14 +1109,6 @@ void ModelList::parseModelsJsonFile(const QByteArray &jsonData, bool save) updateData(id, ModelList::QuantRole, "NA"); updateData(id, ModelList::TypeRole, "GPT"); } - - if (installedModels()->count()) { - const QString firstModel = - installedModels()->firstId(); - QSettings settings; - settings.setValue("defaultModel", firstModel); - settings.sync(); - } } void ModelList::updateModelsFromSettings() diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index b2d403325d97..cc70c5f0d2e0 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -127,7 +127,6 @@ class InstalledModels : public QSortFilterProxyModel public: explicit InstalledModels(QObject *parent); int count() const; - QString firstId() const; Q_SIGNALS: void countChanged(); From 8893db58964a59e2c75c6e9f82f3694e2c8fe8ff Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 14:12:46 -0400 Subject: [PATCH 120/198] Add wizard model and rename orca to be more specific. --- gpt4all-chat/metadata/models.json | 62 ++++++++++++++++++------------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json index 1b1387e168f0..f0578f71c89c 100644 --- a/gpt4all-chat/metadata/models.json +++ b/gpt4all-chat/metadata/models.json @@ -1,18 +1,15 @@ [ { "order": "a", - "md5sum": "4acc146dd43eb02845c233c29289c7c5", - "name": "Hermes", - "filename": "nous-hermes-13b.ggmlv3.q4_0.bin", - "filesize": "8136777088", - "requires": "2.4.7", + "md5sum": "e8d47924f433bd561cb5244557147793", + "name": "Wizard v1.1", + "filename": "wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin", + "filesize": "7323310848", "ramrequired": "16", "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", - "description": "Best overall model
    • Instruction based
    • Gives long responses
    • Curated with 300,000 uncensored instructions
    • Trained by Nous Research
    • Cannot be used commercially
    ", - "url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin", - "promptTemplate": "### Instruction:\n%1\n### Response:\n" + "description": "Best overall model
    • Instruction based
    • Gives very long responses
    • Finetuned with only 1k of high-quality data
    • Trained by Microsoft and Peking University
    • Cannot be used commerciallyExtremely good model
      • Instruction based
      • Gives long responses
      • Curated with 300,000 uncensored instructions
      • Trained by Nous Research
      • Cannot be used commercially
      ", + "url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin", + "promptTemplate": "### Instruction:\n%1\n### Response:\n" + }, + { + "order": "e", "md5sum": "81a09a0ddf89690372fc296ff7f625af", "name": "Groovy", "filename": "ggml-gpt4all-j-v1.3-groovy.bin", @@ -42,7 +54,7 @@ "description": "Creative model can be used for commercial purposes
      • Fast responses
      • Creative responses
      • Instruction based
      • Trained by Nomic AI
      • Licensed for commercial use
      " }, { - "order": "e", + "order": "f", "md5sum": "11d9f060ca24575a2c303bdc39952486", "name": "Snoozy", "filename": "GPT4All-13B-snoozy.ggmlv3.q4_0.bin", @@ -56,7 +68,7 @@ "url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin" }, { - "order": "f", + "order": "g", "md5sum": "756249d3d6abe23bde3b1ae272628640", "name": "MPT Chat", "filename": "ggml-mpt-7b-chat.bin", @@ -71,9 +83,9 @@ "systemPrompt": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>" }, { - "order": "g", + "order": "h", "md5sum": "e64e74375ce9d36a3d0af3db1523fd0a", - "name": "Orca", + "name": "Mini Orca", "filename": "orca-mini-7b.ggmlv3.q4_0.bin", "filesize": "3791749248", "requires": "2.4.7", @@ -87,9 +99,9 @@ "systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" }, { - "order": "h", + "order": "i", "md5sum": "6a087f7f4598fad0bb70e6cb4023645e", - "name": "Orca (Small)", + "name": "Mini Orca (Small)", "filename": "orca-mini-3b.ggmlv3.q4_0.bin", "filesize": "1928446208", "requires": "2.4.7", @@ -103,9 +115,9 @@ "systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" }, { - "order": "i", + "order": "j", "md5sum": "959b7f65b2d12fd1e3ff99e7493c7a3a", - "name": "Orca (Large)", + "name": "Mini Orca (Large)", "filename": "orca-mini-13b.ggmlv3.q4_0.bin", "filesize": "7323329152", "requires": "2.4.7", @@ -119,7 +131,7 @@ "systemPrompt": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n" }, { - "order": "j", + "order": "k", "md5sum": "29119f8fa11712704c6b22ac5ab792ea", "name": "Vicuna", "filename": "ggml-vicuna-7b-1.1-q4_2.bin", @@ -131,7 +143,7 @@ "description": "Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
      • Instruction based
      • Cannot be used commercially
      " }, { - "order": "k", + "order": "l", "md5sum": "95999b7b0699e2070af63bf5d34101a8", "name": "Vicuna (large)", "filename": "ggml-vicuna-13b-1.1-q4_2.bin", @@ -143,7 +155,7 @@ "description": "Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
      • Instruction based
      • Cannot be used commercially
      " }, { - "order": "l", + "order": "m", "md5sum": "99e6d129745a3f1fb1121abed747b05a", "name": "Wizard", "filename": "ggml-wizardLM-7B.q4_2.bin", @@ -155,7 +167,7 @@ "description": "Good small model - trained by by Microsoft and Peking University
      • Instruction based
      • Cannot be used commercially
      " }, { - "order": "m", + "order": "n", "md5sum": "6cb4ee297537c9133bddab9692879de0", "name": "Stable Vicuna", "filename": "ggml-stable-vicuna-13B.q4_2.bin", @@ -168,7 +180,7 @@ "systemPrompt": "## Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!\n\n" }, { - "order": "n", + "order": "o", "md5sum": "1cfa4958f489f0a0d1ffdf6b37322809", "name": "MPT Instruct", "filename": "ggml-mpt-7b-instruct.bin", @@ -181,7 +193,7 @@ "description": "Mosaic's instruction model
      • Instruction based
      • Trained by Mosaic ML
      • Licensed for commercial use
      " }, { - "order": "o", + "order": "p", "md5sum": "120c32a51d020066288df045ef5d52b9", "name": "MPT Base", "filename": "ggml-mpt-7b-base.bin", @@ -194,7 +206,7 @@ "description": "Trained for text completion with no assistant finetuning
      • Completion based
      • Trained by Mosaic ML
      • Licensed for commercial use
      " }, { - "order": "p", + "order": "q", "md5sum": "d5eafd5b0bd0d615cfd5fd763f642dfe", "name": "Nous Vicuna", "filename": "ggml-nous-gpt4-vicuna-13b.bin", @@ -206,7 +218,7 @@ "description": "Trained on ~180,000 instructions
      • Instruction based
      • Trained by Nous Research
      • Cannot be used commercially
      " }, { - "order": "q", + "order": "r", "md5sum": "489d21fd48840dcb31e5f92f453f3a20", "name": "Wizard Uncensored", "filename": "wizardLM-13B-Uncensored.ggmlv3.q4_0.bin", @@ -220,7 +232,7 @@ "url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin" }, { - "order": "r", + "order": "s", "md5sum": "615890cb571fcaa0f70b2f8d15ef809e", "disableGUI": "true", "name": "Replit", From 6a8fa27c8d2a1b3e9c075b94415e75483248c096 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 12 Jul 2023 14:12:41 -0400 Subject: [PATCH 121/198] Correctly find models in subdirs of model dir QDirIterator doesn't seem particular subdir aware, its path() returns the iterated dir. This was the simplest way I found to get this right. --- gpt4all-chat/modellist.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/modellist.cpp b/gpt4all-chat/modellist.cpp index 5f43e0fdc81b..6aacc82d55cf 100644 --- a/gpt4all-chat/modellist.cpp +++ b/gpt4all-chat/modellist.cpp @@ -821,7 +821,7 @@ void ModelList::updateModelsFromDirectory() for (const QString &id : modelsById) { updateData(id, FilenameRole, filename); updateData(id, ChatGPTRole, filename.startsWith("chatgpt-")); - updateData(id, DirpathRole, path); + updateData(id, DirpathRole, info.dir().absolutePath() + "/"); updateData(id, FilesizeRole, toFileSize(info.size())); } } From be395c12cc2c6e1233720f84f9e594d0e41bf2ce Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 14:27:48 -0400 Subject: [PATCH 122/198] Make all system prompts empty by default if model does not include in training data. --- gpt4all-chat/chatllm.cpp | 7 ++++++- gpt4all-chat/metadata/models.json | 13 +++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 088083d56be3..0f5da45f372a 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -788,13 +788,18 @@ void ChatLLM::processSystemPrompt() if (!isModelLoaded() || m_processedSystemPrompt || m_isServer) return; + const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString(); + if (systemPrompt.empty()) { + m_processedSystemPrompt = true; + return; + } + m_stopGenerating = false; auto promptFunc = std::bind(&ChatLLM::handleSystemPrompt, this, std::placeholders::_1); auto responseFunc = std::bind(&ChatLLM::handleSystemResponse, this, std::placeholders::_1, std::placeholders::_2); auto recalcFunc = std::bind(&ChatLLM::handleSystemRecalculate, this, std::placeholders::_1); - const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString(); const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo); const int32_t top_k = MySettings::globalInstance()->modelTopK(m_modelInfo); const float top_p = MySettings::globalInstance()->modelTopP(m_modelInfo); diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json index f0578f71c89c..13928a6207fd 100644 --- a/gpt4all-chat/metadata/models.json +++ b/gpt4all-chat/metadata/models.json @@ -9,6 +9,7 @@ "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", + "systemPrompt": " ", "description": "Best overall model
      • Instruction based
      • Gives very long responses
      • Finetuned with only 1k of high-quality data
      • Trained by Microsoft and Peking University
      • Cannot be used commerciallyBest overall smaller model
        • Fast responses
        • Instruction based
        • Trained by TII
        • Finetuned by Nomic AI
        • Licensed for commercial use
        ", "url": "https://huggingface.co/nomic-ai/gpt4all-falcon-ggml/resolve/main/ggml-model-gpt4all-falcon-q4_0.bin", "promptTemplate": "### Instruction:\n%1\n### Response:\n" @@ -37,6 +39,7 @@ "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", + "systemPrompt": " ", "description": "Extremely good model
        • Instruction based
        • Gives long responses
        • Curated with 300,000 uncensored instructions
        • Trained by Nous Research
        • Cannot be used commercially
        ", "url": "https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin", "promptTemplate": "### Instruction:\n%1\n### Response:\n" @@ -51,6 +54,7 @@ "parameters": "7 billion", "quant": "q4_0", "type": "GPT-J", + "systemPrompt": " ", "description": "Creative model can be used for commercial purposes
        • Fast responses
        • Creative responses
        • Instruction based
        • Trained by Nomic AI
        • Licensed for commercial use
        " }, { @@ -64,6 +68,7 @@ "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", + "systemPrompt": " ", "description": "Very good overall model
        • Instruction based
        • Based on the same dataset as Groovy
        • Slower than Groovy, with higher quality responses
        • Trained by Nomic AI
        • Cannot be used commercially
        ", "url": "https://huggingface.co/TheBloke/GPT4All-13B-snoozy-GGML/resolve/main/GPT4All-13B-snoozy.ggmlv3.q4_0.bin" }, @@ -140,6 +145,7 @@ "parameters": "7 billion", "quant": "q4_2", "type": "LLaMA", + "systemPrompt": " ", "description": "Good small model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
        • Instruction based
        • Cannot be used commercially
        " }, { @@ -152,6 +158,7 @@ "parameters": "13 billion", "quant": "q4_2", "type": "LLaMA", + "systemPrompt": " ", "description": "Good larger model - trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego
        • Instruction based
        • Cannot be used commercially
        " }, { @@ -164,6 +171,7 @@ "parameters": "7 billion", "quant": "q4_2", "type": "LLaMA", + "systemPrompt": " ", "description": "Good small model - trained by by Microsoft and Peking University
        • Instruction based
        • Cannot be used commercially
        " }, { @@ -190,6 +198,7 @@ "parameters": "7 billion", "quant": "q4_0", "type": "MPT", + "systemPrompt": " ", "description": "Mosaic's instruction model
        • Instruction based
        • Trained by Mosaic ML
        • Licensed for commercial use
        " }, { @@ -203,6 +212,7 @@ "parameters": "7 billion", "quant": "q4_0", "type": "MPT", + "systemPrompt": " ", "description": "Trained for text completion with no assistant finetuning
        • Completion based
        • Trained by Mosaic ML
        • Licensed for commercial use
        " }, { @@ -215,6 +225,7 @@ "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", + "systemPrompt": " ", "description": "Trained on ~180,000 instructions
        • Instruction based
        • Trained by Nous Research
        • Cannot be used commercially
        " }, { @@ -228,6 +239,7 @@ "parameters": "13 billion", "quant": "q4_0", "type": "LLaMA", + "systemPrompt": " ", "description": "Trained on uncensored assistant data and instruction data
        • Instruction based
        • Cannot be used commercially
        ", "url": "https://huggingface.co/TheBloke/WizardLM-13B-Uncensored-GGML/resolve/main/wizardLM-13B-Uncensored.ggmlv3.q4_0.bin" }, @@ -243,6 +255,7 @@ "parameters": "3 billion", "quant": "f16", "type": "Replit", + "systemPrompt": " ", "description": "Trained on subset of the Stack
        • Code completion based
        • Licensed for commercial use
        ", "url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin" } From 8eb08442779d72417a5fe762bd307fe22f446025 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 14:30:11 -0400 Subject: [PATCH 123/198] Check if the trimmed version is empty. --- gpt4all-chat/chatllm.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 0f5da45f372a..181b84528a4b 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -789,7 +789,7 @@ void ChatLLM::processSystemPrompt() return; const std::string systemPrompt = MySettings::globalInstance()->modelSystemPrompt(m_modelInfo).toStdString(); - if (systemPrompt.empty()) { + if (QString::fromStdString(systemPrompt).trimmed().isEmpty()) { m_processedSystemPrompt = true; return; } From e8b19b8e82ab57399830ea079eae93d9bd9c4fb8 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Wed, 12 Jul 2023 14:58:45 -0400 Subject: [PATCH 124/198] Bump version to 2.4.14 and provide release notes. --- gpt4all-chat/CMakeLists.txt | 2 +- gpt4all-chat/metadata/release.json | 34 ++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index a94eb844f7c3..8bd5becc5643 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -18,7 +18,7 @@ endif() set(APP_VERSION_MAJOR 2) set(APP_VERSION_MINOR 4) -set(APP_VERSION_PATCH 13) +set(APP_VERSION_PATCH 14) set(APP_VERSION "${APP_VERSION_MAJOR}.${APP_VERSION_MINOR}.${APP_VERSION_PATCH}") # Include the binary directory for the generated header file diff --git a/gpt4all-chat/metadata/release.json b/gpt4all-chat/metadata/release.json index 7efd7d1e3217..7db1a227d531 100644 --- a/gpt4all-chat/metadata/release.json +++ b/gpt4all-chat/metadata/release.json @@ -416,6 +416,40 @@ * Akarshan Biswas * Adam Treat (Nomic AI) * Community (beta testers, bug reporters) +" + }, + { + "version": "2.4.13", + "notes": +" +* Fix bug with prolonging shutdown with generation +* Fix bug with update model info on deleting chats +* Fix bug with preventing closing of model download dialog +* Allows allow closing the model download dialog +* Fix numerous bugs with download of models.json and provide backup option +* Add json and c# highlighting +* Fix bug with chatgpt crashing +* Fix bug with chatgpt not working for some keys +* Fix bug with mixpanel opt outs not counting +* Fix problem with OOM errors causing crash and then repeating on next start +* Fix default thread setting and provide guardrails +* Fix tap handler in settings dialog for buttons +* Fix color of some text fields on macOS for settings dialog +* Fix problem with startup dialog not closing +* Provide error dialog for settings file not accessible +* Try and fix problems with avx-only detection +* Fix showing error in model downloads unnecessarily +* Prefer 7b models to load by default +* Add Wizard v1.1 to download list +* Rename Orca models to Mini Orca +* Don't use a system prompt unless model was trained with one by default +", + "contributors": +" +* Lakshay Kansal (Nomic AI) +* Aaron Miller (Nomic AI) +* Adam Treat (Nomic AI) +* Community (beta testers, bug reporters) " } ] From 6c4f449b7a0dc5ba8c4b0fa61dd1f9448b65bf2e Mon Sep 17 00:00:00 2001 From: Zach Nussbaum Date: Wed, 12 Jul 2023 15:18:24 -0400 Subject: [PATCH 125/198] fix: update train scripts and configs for other models (#1164) * feat: falcon config * feat: mpt config * chore: gitignore * refactor: step calculation * fix: attention mask + shuffle on epoch end * fix: return tensors * fix: wait for everyone * chore: config * chore: ds config * fix: remove ccols * fix: logging and saving * chore: add einops --- .gitignore | 3 ++ .../configs/deepspeed/ds_config_mpt.json | 49 ++++++++++++++++++ .../configs/deepspeed/ds_config_pythia.json | 48 +++++++++++++++++ .../configs/train/finetune_falcon.yaml | 34 +++++++++++++ .../configs/train/finetune_mpt.yaml | 34 +++++++++++++ .../configs/train/finetune_openllama.yaml | 34 +++++++++++++ gpt4all-training/data.py | 19 ++++--- gpt4all-training/requirements.txt | 2 +- gpt4all-training/train.py | 51 +++++++++++-------- 9 files changed, 245 insertions(+), 29 deletions(-) create mode 100644 gpt4all-training/configs/deepspeed/ds_config_mpt.json create mode 100644 gpt4all-training/configs/deepspeed/ds_config_pythia.json create mode 100644 gpt4all-training/configs/train/finetune_falcon.yaml create mode 100644 gpt4all-training/configs/train/finetune_mpt.yaml create mode 100644 gpt4all-training/configs/train/finetune_openllama.yaml diff --git a/.gitignore b/.gitignore index 67cf225f1206..1e8a5c364513 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +*.arrow +squad_* +*sbert_embedded* *.pkl ckpts* .deepspeed_env diff --git a/gpt4all-training/configs/deepspeed/ds_config_mpt.json b/gpt4all-training/configs/deepspeed/ds_config_mpt.json new file mode 100644 index 000000000000..76ed092c9c0c --- /dev/null +++ b/gpt4all-training/configs/deepspeed/ds_config_mpt.json @@ -0,0 +1,49 @@ +{ + "train_batch_size": "auto", + "gradient_accumulation_steps": "auto", + "train_micro_batch_size_per_gpu": "auto", + "fp16": { + "enabled": "auto", + "min_loss_scale": 1, + "loss_scale_window": 1000, + "hysteresis": 2, + "initial_scale_power": 32 + }, + "bf16": { + "enabled": "auto" + }, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 1, + "offload_param": { + "device": "none" + }, + "offload_optimizer": { + "device": "none" + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "contiguous_gradients": true + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-08 + } + }, + "scheduler": { + "type": "WarmupDecayLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "warmup_type": "linear", + "total_num_steps": "auto" + } + } +} \ No newline at end of file diff --git a/gpt4all-training/configs/deepspeed/ds_config_pythia.json b/gpt4all-training/configs/deepspeed/ds_config_pythia.json new file mode 100644 index 000000000000..6f9b29617522 --- /dev/null +++ b/gpt4all-training/configs/deepspeed/ds_config_pythia.json @@ -0,0 +1,48 @@ +{ + "train_batch_size": "auto", + "gradient_accumulation_steps": "auto", + "train_micro_batch_size_per_gpu": "auto", + "fp16": { + "enabled": "auto", + "min_loss_scale": 1, + "loss_scale_window": 1000, + "hysteresis": 2, + "initial_scale_power": 32 + }, + "bf16": { + "enabled": "auto" + }, + "gradient_clipping": 1.0, + "zero_optimization": { + "stage": 2, + "offload_param": { + "device": "none" + }, + "offload_optimizer": { + "device": "none" + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "contiguous_gradients": true + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": "auto", + "betas": [ + 0.9, + 0.999 + ], + "eps": 1e-08 + } + }, + "scheduler": { + "type": "WarmupLR", + "params": { + "warmup_min_lr": 0, + "warmup_max_lr": "auto", + "warmup_num_steps": "auto", + "warmup_type": "linear" + } + } +} \ No newline at end of file diff --git a/gpt4all-training/configs/train/finetune_falcon.yaml b/gpt4all-training/configs/train/finetune_falcon.yaml new file mode 100644 index 000000000000..089708bb4c90 --- /dev/null +++ b/gpt4all-training/configs/train/finetune_falcon.yaml @@ -0,0 +1,34 @@ +# model/tokenizer +model_name: "tiiuae/falcon-7b" +tokenizer_name: "tiiuae/falcon-7b" +gradient_checkpointing: true +save_name: "nomic-ai/gpt4all-falcon" + +# dataset +streaming: false +num_proc: 64 +dataset_path: "nomic-ai/gpt4all-j-prompt-generations" +revision: "v1.3-groovy" +max_length: 1024 +batch_size: 32 + +# train dynamics +lr: 2.0e-5 +min_lr: 0 +weight_decay: 0.0 +eval_every: 500 +eval_steps: 105 +save_every: 1000 +log_grads_every: 500 +output_dir: "ckpts/falcon" +checkpoint: "/home/paperspace/gpt4all/ckpts/mpt/step_1000" +lora: false +warmup_steps: 500 +num_epochs: 2 + +# logging +wandb: true +wandb_entity: "gpt4all" +wandb_project_name: "gpt4all" +seed: 42 + diff --git a/gpt4all-training/configs/train/finetune_mpt.yaml b/gpt4all-training/configs/train/finetune_mpt.yaml new file mode 100644 index 000000000000..4e1f36380906 --- /dev/null +++ b/gpt4all-training/configs/train/finetune_mpt.yaml @@ -0,0 +1,34 @@ +# model/tokenizer +model_name: "mosaicml/mpt-7b" +tokenizer_name: "mosaicml/mpt-7b" +gradient_checkpointing: false +save_name: "nomic-ai/mpt-finetuned-round2" + +# dataset +streaming: false +num_proc: 64 +dataset_path: "nomic-ai/gpt4all-j-prompt-generations" +revision: "v1.3-groovy" +max_length: 1024 +batch_size: 8 + +# train dynamics +lr: 2.0e-5 +min_lr: 0 +weight_decay: 0.0 +eval_every: 500 +eval_steps: 105 +save_every: 1000 +log_grads_every: 500 +output_dir: "ckpts/mpt" +checkpoint: null +lora: false +warmup_steps: 500 +num_epochs: 2 + +# logging +wandb: false +wandb_entity: "gpt4all" +wandb_project_name: "gpt4all" +seed: 42 + diff --git a/gpt4all-training/configs/train/finetune_openllama.yaml b/gpt4all-training/configs/train/finetune_openllama.yaml new file mode 100644 index 000000000000..6862f61147a8 --- /dev/null +++ b/gpt4all-training/configs/train/finetune_openllama.yaml @@ -0,0 +1,34 @@ +# model/tokenizer +model_name: "openlm-research/open_llama_7b" +tokenizer_name: "openlm-research/open_llama_7b" +gradient_checkpointing: true +save_name: "nomic-ai/gpt4all-openllama" + +# dataset +streaming: false +num_proc: 64 +dataset_path: "nomic-ai/gpt4all-updated" +revision: null +max_length: 1024 +batch_size: 32 + +# train dynamics +lr: 2.0e-5 +min_lr: 0 +weight_decay: 0.0 +eval_every: 500 +log_every: 10 +save_every: 1000 +log_grads_every: 500 +output_dir: "ckpts/falcon" +checkpoint: null +lora: false +warmup_steps: 500 +num_epochs: 3 + +# logging +wandb: true +wandb_entity: "gpt4all" +wandb_project_name: "gpt4all" +seed: 42 + diff --git a/gpt4all-training/data.py b/gpt4all-training/data.py index 8227de00065b..f10847de5a62 100644 --- a/gpt4all-training/data.py +++ b/gpt4all-training/data.py @@ -12,7 +12,7 @@ def tokenize_inputs(config, tokenizer, examples): # hacky backward compatible different_eos = tokenizer.eos_token != "" - out = {"labels": [], "input_ids": []} + out = {"labels": [], "input_ids": [], "attention_mask": []} for prompt, response in zip(examples["prompt"], examples["response"]): if different_eos: if response.count(" \n") > 0: @@ -49,9 +49,10 @@ def tokenize_inputs(config, tokenizer, examples): print(response) raise - input_tokens = tokenizer.pad({"input_ids": input_tokens}, padding="max_length", max_length=max_length)["input_ids"] + padded = tokenizer.pad({"input_ids": input_tokens}, padding="max_length", max_length=max_length, return_tensors="pt") out["labels"].append(labels) - out["input_ids"].append(input_tokens) + out["input_ids"].append(padded["input_ids"]) + out["attention_mask"].append(padded["attention_mask"]) out = {k: torch.stack(v) if isinstance(v, list) else v for k, v in out.items()} @@ -72,7 +73,7 @@ def load_data(config, tokenizer): dataset = load_dataset("json", data_files=files, split="train") else: - dataset = load_dataset(dataset_path, split="train") + dataset = load_dataset(dataset_path, split="train", revision=config["revision"] if "revision" in config else None) dataset = dataset.train_test_split(test_size=.05, seed=config["seed"]) @@ -83,19 +84,23 @@ def load_data(config, tokenizer): else: kwargs = {} + cols_to_keep = ["input_ids", "labels", "attention_mask"] # tokenize inputs and return labels and attention mask train_dataset = train_dataset.map( lambda ele: tokenize_inputs(config, tokenizer, ele), batched=True, - remove_columns=["source", "prompt"], **kwargs ) + remove_cols = [col for col in train_dataset.column_names if col not in cols_to_keep] + train_dataset = train_dataset.remove_columns(remove_cols) + val_dataset = val_dataset.map( lambda ele: tokenize_inputs(config, tokenizer, ele), batched=True, - remove_columns=["source", "prompt"], **kwargs ) + remove_cols = [col for col in val_dataset.column_names if col not in cols_to_keep] + val_dataset = val_dataset.remove_columns(remove_cols) train_dataset = train_dataset.with_format("torch") val_dataset = val_dataset.with_format("torch") @@ -106,12 +111,14 @@ def load_data(config, tokenizer): train_dataset, collate_fn=DefaultDataCollator(), batch_size=config["batch_size"], + shuffle=True, ) val_dataloader = DataLoader( val_dataset, collate_fn=DefaultDataCollator(), batch_size=config["batch_size"], + shuffle=True, ) return train_dataloader, val_dataloader diff --git a/gpt4all-training/requirements.txt b/gpt4all-training/requirements.txt index b38ab36ccce0..110977d231ab 100644 --- a/gpt4all-training/requirements.txt +++ b/gpt4all-training/requirements.txt @@ -1,10 +1,10 @@ accelerate datasets +einops torchmetrics evaluate transformers>=4.28.0 wandb -pip peft nodelist-inflator deepspeed diff --git a/gpt4all-training/train.py b/gpt4all-training/train.py index 69ebce28b4ce..829041f6dcca 100644 --- a/gpt4all-training/train.py +++ b/gpt4all-training/train.py @@ -1,5 +1,5 @@ import os -from transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler, LlamaForCausalLM +from transformers import AutoModelForCausalLM, AutoTokenizer, get_scheduler import torch from torch.optim import AdamW from argparse import ArgumentParser @@ -42,7 +42,7 @@ def train(accelerator, config): accelerator.print(config) accelerator.print(f"Using {accelerator.num_processes} GPUs") - tokenizer = AutoTokenizer.from_pretrained(config['tokenizer_name'], model_max_length=config['max_length']) + tokenizer = AutoTokenizer.from_pretrained(config['tokenizer_name'], model_max_length=config['max_length'], use_fast=False) # if no pad token, set it to eos if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token @@ -53,6 +53,7 @@ def train(accelerator, config): checkpoint = config["gradient_checkpointing"] + model = AutoModelForCausalLM.from_pretrained(config["model_name"], use_cache=False if checkpoint else True, trust_remote_code=True) @@ -86,7 +87,7 @@ def train(accelerator, config): # decay to min_lr instead of 0 lr_ratio = config["min_lr"] / config["lr"] accelerator.print(f"Len of train_dataloader: {len(train_dataloader)}") - total_num_steps = (len(train_dataloader) / gradient_accumulation_steps) * config["num_epochs"] + total_num_steps = (len(train_dataloader) / gradient_accumulation_steps) * (config["num_epochs"]) # instead of decaying to zero, decay to ratio of min_lr / lr total_num_steps += int(total_num_steps * lr_ratio) + config["warmup_steps"] accelerator.print(f"Total training steps: {total_num_steps}") @@ -104,7 +105,7 @@ def train(accelerator, config): ) else: scheduler = DummyScheduler( - optimizer, total_num_steps=config["warmup_steps"], warmup_num_steps=config["warmup_steps"] + optimizer, total_num_steps=total_num_steps, warmup_num_steps=config["warmup_steps"] ) model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare( @@ -117,26 +118,34 @@ def train(accelerator, config): if config["checkpoint"]: accelerator.load_state(config["checkpoint"]) accelerator.print(f"Resumed from checkpoint: {config['checkpoint']}") - path = os.path.basename(config["train_args"]["resume_from_checkpoint"]) + path = os.path.basename(config["checkpoint"]) training_difference = os.path.splitext(path)[0] resume_step = int(training_difference.replace("step_", "")) - accelerator.skip_first_batches(train_dataloader, resume_step) + train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) accelerator.print(f"Resuming from step {resume_step}") + else: + resume_step = 0 # log gradients if accelerator.is_main_process and config["wandb"]: wandb.watch(model, log_freq=config["log_grads_every"], log="all") - for epoch in range(config["num_epochs"]): + + accelerator.wait_for_everyone() + + for epoch in range(0, config["num_epochs"]): train_loss = MeanMetric(nan_strategy="error").to(model.device) for step, batch in enumerate(tqdm(train_dataloader)): + curr_step = epoch * len(train_dataloader) + step model.train() outputs = model(**batch) loss = outputs.loss # gather loss before backprop in case of gradient accumulation loss_values = accelerator.gather_for_metrics({"loss": loss.detach().float()}) + if config["wandb"]: + accelerator.log({"loss": torch.mean(loss_values["loss"]).item()}, step=curr_step) train_loss.update(loss_values["loss"]) loss = loss / gradient_accumulation_steps @@ -144,9 +153,8 @@ def train(accelerator, config): # get gradient norm of all params # log LR in case something weird happens - if step > 0 and step % (config["eval_every"] // 10) == 0: + if step > 0 and step % (config["log_lr_every"]) == 0: if config["wandb"]: - curr_step = step + epoch * len(train_dataloader) accelerator.log({"lr": scheduler.get_last_lr()[0]}, step=curr_step) if (step + 1) % gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: @@ -156,7 +164,6 @@ def train(accelerator, config): if step > 0 and step % config["save_every"] == 0: - curr_step = step + epoch * len(train_dataloader) accelerator.save_state(f"{config['output_dir']}/step_{curr_step}") if step > 0 and (step % config["eval_every"] == 0 or step == len(train_dataloader) - 1): @@ -170,7 +177,6 @@ def train(accelerator, config): } if config["wandb"]: - curr_step = step + epoch * len(train_dataloader) accelerator.log({**log_train, **log_val}, step=curr_step) accelerator.print(f"Current LR: {scheduler.get_last_lr()[0]}") @@ -181,8 +187,14 @@ def train(accelerator, config): accelerator.print(f"Epoch {epoch} finished") accelerator.print(f"Pushing to HF hub") - accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) + + unwrapped_model.save_pretrained( + f"{config['output_dir']}/epoch_{epoch}", + is_main_process=accelerator.is_main_process, + save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), + ) try: if accelerator.is_main_process: unwrapped_model.push_to_hub(config["save_name"] + f"-epoch_{epoch}", private=True) @@ -191,21 +203,16 @@ def train(accelerator, config): accelerator.print(e) accelerator.print(f"Failed to push to hub") + + if config["num_epochs"] > 1: + accelerator.wait_for_everyone() + unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( - f"{config['output_dir']}/epoch_{epoch}", + f"{config['output_dir']}/final", is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model), ) - - accelerator.wait_for_everyone() - unwrapped_model = accelerator.unwrap_model(model) - unwrapped_model.save_pretrained( - f"{config['output_dir']}/final", - is_main_process=accelerator.is_main_process, - save_function=accelerator.save, - state_dict=accelerator.get_state_dict(model), - ) accelerator.end_training() From 00a945eaee12e694e8bdafd58b0871524114f330 Mon Sep 17 00:00:00 2001 From: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> Date: Wed, 12 Jul 2023 16:23:04 +0200 Subject: [PATCH 126/198] Update gpt4all_faq.md - Add information about AVX/AVX2. - Update supported architectures. Signed-off-by: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> --- gpt4all-bindings/python/docs/gpt4all_faq.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/gpt4all-bindings/python/docs/gpt4all_faq.md b/gpt4all-bindings/python/docs/gpt4all_faq.md index ab14efab6ae7..f1e84550a7cc 100644 --- a/gpt4all-bindings/python/docs/gpt4all_faq.md +++ b/gpt4all-bindings/python/docs/gpt4all_faq.md @@ -2,11 +2,13 @@ ## What models are supported by the GPT4All ecosystem? -Currently, there are three different model architectures that are supported: +Currently, there are five different model architectures that are supported: -1. GPTJ - Based off of the GPT-J architecture with examples found [here](https://huggingface.co/EleutherAI/gpt-j-6b) -2. LLAMA - Based off of the LLAMA architecture with examples found [here](https://huggingface.co/models?sort=downloads&search=llama) +1. GPT-J - Based off of the GPT-J architecture with examples found [here](https://huggingface.co/EleutherAI/gpt-j-6b) +2. LLaMA - Based off of the LLaMA architecture with examples found [here](https://huggingface.co/models?sort=downloads&search=llama) 3. MPT - Based off of Mosaic ML's MPT architecture with examples found [here](https://huggingface.co/mosaicml/mpt-7b) +4. Replit - Based off of Replit Inc.'s Replit architecture with examples found [here](https://huggingface.co/replit/replit-code-v1-3b) +5. Falcon - Based off of TII's Falcon architecture with examples found [here](https://huggingface.co/tiiuae/falcon-40b) ## Why so many different architectures? What differentiates them? @@ -25,6 +27,10 @@ The upstream [llama.cpp](https://github.com/ggerganov/llama.cpp) project has int Fortunately, we have engineered a submoduling system allowing us to dynamically load different versions of the underlying library so that GPT4All just works. +## What are the system requirements? + +Your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions) and you need enough RAM to load a model into memory. + ## What about GPU inference? In newer versions of llama.cpp, there has been some added support for NVIDIA GPU's for inference. We're investigating how to incorporate this into our downloadable installers. From 18ca8901f08db9835bb07077ad375d50961577aa Mon Sep 17 00:00:00 2001 From: AT Date: Wed, 12 Jul 2023 16:30:56 -0400 Subject: [PATCH 127/198] Update README.md Signed-off-by: AT --- gpt4all-chat/README.md | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/gpt4all-chat/README.md b/gpt4all-chat/README.md index 8fe7bf970cba..58e62113d01e 100644 --- a/gpt4all-chat/README.md +++ b/gpt4all-chat/README.md @@ -51,19 +51,7 @@ One click installers for macOS, Linux, and Windows at https://gpt4all.io If you've already checked out the source code and/or built the program make sure when you do a git fetch to get the latest changes and that you also do ```git submodule update --init --recursive``` to update the submodules. ## Manual download of models -* https://gpt4all.io/models/ggml-mpt-7b-chat.bin (default) (md5sum 756249d3d6abe23bde3b1ae272628640) Current best non-commercially licensable chat model based on MPT and trained by Mosaic ML. -* https://gpt4all.io/models/ggml-gpt4all-j-v1.3-groovy.bin (default) (md5sum 81a09a0ddf89690372fc296ff7f625af) Current best commercially licensable model based on GPT-J and trained by Nomic AI on the latest curated GPT4All dataset. -* https://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin (md5sum 91f886b68fbce697e9a3cd501951e455) Current best non-commercially licensable model based on Llama 13b and trained by Nomic AI on the latest curated GPT4All dataset. -* https://gpt4all.io/models/ggml-gpt4all-j-v1.2-jazzy.bin (md5sum 879344aaa9d62fdccbda0be7a09e7976) A commercially licensable model based on GPT-J and trained by Nomic AI on the v2 GPT4All dataset. -* https://gpt4all.io/models/ggml-gpt4all-j-v1.1-breezy.bin (md5sum 61d48a82cb188cceb14ebb8082bfec37) A commercially licensable model based on GPT-J and trained by Nomic AI on the v1 GPT4All dataset. -* https://gpt4all.io/models/ggml-gpt4all-j.bin (md5sum 5b5a3f9b858d33b29b52b89692415595) A commercially licensable model based on GPT-J and trained by Nomic AI on the v0 GPT4All dataset. -* https://gpt4all.io/models/ggml-vicuna-7b-1.1-q4_2.bin (md5sum 29119f8fa11712704c6b22ac5ab792ea) An non-commercially licensable model based on Llama 7b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego. -* https://gpt4all.io/models/ggml-vicuna-13b-1.1-q4_2.bin (md5sum 95999b7b0699e2070af63bf5d34101a8) An non-commercially licensable model based on Llama 13b and trained by teams from UC Berkeley, CMU, Stanford, MBZUAI, and UC San Diego. -* https://gpt4all.io/models/ggml-wizardLM-7B.q4_2.bin (md5sum 99e6d129745a3f1fb1121abed747b05a) An non-commercially licensable model based on Llama 7b and trained by Microsoft and Peking University. -* https://gpt4all.io/models/ggml-stable-vicuna-13B.q4_2.bin (md5sum 6cb4ee297537c9133bddab9692879de0) An non-commercially licensable model based on Llama 13b and RLHF trained by Stable AI. -* https://gpt4all.io/models/ggml-mpt-7b-base.bin (md5sum 120c32a51d020066288df045ef5d52b9) A commercially licensable model base pre-trained by Mosaic ML. -* https://gpt4all.io/models/ggml-nous-gpt4-vicuna-13b.bin (md5sum d5eafd5b0bd0d615cfd5fd763f642dfe) A non-commercially licensable model based on Vicuna 13b, fine-tuned on ~180,000 instructions, trained by Nous Research. -* https://gpt4all.io/models/ggml-mpt-7b-instruct.bin (md5sum 1cfa4958f489f0a0d1ffdf6b37322809) A commercially licensable instruct model based on MPT and trained by Mosaic ML. +* You can find a 'Model Explorer' on the official website where you can manually download models that we support: https://gpt4all.io/index.html ## Terminal Only Interface with no Qt dependency From a0dae86a957337b20c3a64cc48480126062b9300 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 13:33:07 -0400 Subject: [PATCH 128/198] Add bert to models.json --- gpt4all-chat/metadata/models.json | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gpt4all-chat/metadata/models.json b/gpt4all-chat/metadata/models.json index 13928a6207fd..af2333f7466a 100644 --- a/gpt4all-chat/metadata/models.json +++ b/gpt4all-chat/metadata/models.json @@ -258,5 +258,20 @@ "systemPrompt": " ", "description": "Trained on subset of the Stack
        • Code completion based
        • Licensed for commercial use
        ", "url": "https://huggingface.co/nomic-ai/ggml-replit-code-v1-3b/resolve/main/ggml-replit-code-v1-3b.bin" + }, + { + "order": "t", + "md5sum": "031bb5d5722c08d13e3e8eaf55c37391", + "disableGUI": "true", + "name": "Bert", + "filename": "ggml-all-MiniLM-L6-v2-f16.bin", + "filesize": "45521167", + "requires": "2.4.14", + "ramrequired": "1", + "parameters": "1 million", + "quant": "f16", + "type": "Bert", + "systemPrompt": " ", + "description": "Sbert
        • For embeddings" } ] From 59cae1132c9409d0533f58264f5f2b33d36652cc Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 13:45:47 -0400 Subject: [PATCH 129/198] Try and unbreak circleci. --- .circleci/continue_config.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 303c441c7a9d..0999c77ba69e 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -717,7 +717,6 @@ workflows: only: requires: - hold - # CSharp Jobs - build-csharp-linux: filters: branches: @@ -745,4 +744,3 @@ workflows: - build-csharp-windows - build-csharp-linux - build-csharp-macos - \ No newline at end of file From b72b409d4040be46d4e25eac66808e0f0c6e49cd Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 13:52:39 -0400 Subject: [PATCH 130/198] try again to unbreak circlci --- .circleci/continue_config.yml | 269 ++++++++++++++++------------------ 1 file changed, 130 insertions(+), 139 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 0999c77ba69e..fd4932b744d8 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -444,156 +444,147 @@ jobs: build-csharp-linux: docker: - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 - steps: - - when: - condition: << pipeline.parameters.run-csharp-workflow >> - steps: - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/linux-x64/native - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - ls -R runtimes - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/linux-x64/native + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + ls -R runtimes + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-nuget-packages-nix + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:$HOME/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-windows: executor: name: win/default size: large shell: powershell.exe -ExecutionPolicy Bypass - steps: - - when: - condition: << pipeline.parameters.run-csharp-workflow >> - steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-win - - attach_workspace: - at: C:\Users\circleci\workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes\win-x64\native - cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet.exe restore Gpt4All - - save_cache: - paths: - - C:\Users\circleci\.nuget\packages - key: gpt4all-csharp-nuget-packages-win - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-win + - attach_workspace: + at: C:\Users\circleci\workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes\win-x64\native + cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet.exe restore Gpt4All + - save_cache: + paths: + - C:\Users\circleci\.nuget\packages + key: gpt4all-csharp-nuget-packages-win + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet.exe build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-macos: macos: xcode: "14.0.0" steps: - - when: - condition: << pipeline.parameters.run-csharp-workflow >> - steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: Install dependencies - command: | - brew install --cask dotnet-sdk - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/osx/native - cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ - cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - run: + name: Install dependencies + command: | + brew install --cask dotnet-sdk + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/osx/native + cp /tmp/workspace/runtimes/osx-x64/*.dylib runtimes/osx/native/ + cp /tmp/workspace/runtimes/osx-x64/*.metal runtimes/osx/native/ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-nuget-packages-nix + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:$HOME/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults store-and-upload-nupkgs: docker: From e59946f05d905f51b143b236f1c43828acb090d8 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 13:55:22 -0400 Subject: [PATCH 131/198] try again to unbreak circleci --- .circleci/continue_config.yml | 84 +++++++++++++++++------------------ 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index fd4932b744d8..0f8b5da5ecde 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -444,48 +444,48 @@ jobs: build-csharp-linux: docker: - image: mcr.microsoft.com/dotnet/sdk:7.0-jammy # Ubuntu 22.04 - steps: - - checkout - - attach_workspace: - at: /tmp/workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes/linux-x64/native - cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ - ls -R runtimes - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-nix - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet restore Gpt4All - - save_cache: - paths: - - ~/.nuget/packages - key: gpt4all-csharp-nuget-packages-nix - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - export PATH="$PATH:$HOME/.dotnet/tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + steps: + - checkout + - attach_workspace: + at: /tmp/workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes/linux-x64/native + cp /tmp/workspace/runtimes/linux-x64/*.so runtimes/linux-x64/native/ + ls -R runtimes + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-nix + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet restore Gpt4All + - save_cache: + paths: + - ~/.nuget/packages + key: gpt4all-csharp-nuget-packages-nix + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + export PATH="$PATH:$HOME/.dotnet/tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-windows: executor: From 64b409e0b853349fb88fd989c3563bc91924fe2c Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 13:57:22 -0400 Subject: [PATCH 132/198] keep trying --- .circleci/continue_config.yml | 84 +++++++++++++++++------------------ 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index 0f8b5da5ecde..f17636fdaff6 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -492,48 +492,48 @@ jobs: name: win/default size: large shell: powershell.exe -ExecutionPolicy Bypass - steps: - - checkout - - restore_cache: - keys: - - gpt4all-csharp-nuget-packages-win - - attach_workspace: - at: C:\Users\circleci\workspace - - run: - name: "Prepare Native Libs" - command: | - cd gpt4all-bindings/csharp - mkdir -p runtimes\win-x64\native - cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ - ls -R runtimes - - run: - name: "Install project dependencies" - command: | - cd gpt4all-bindings/csharp - dotnet.exe restore Gpt4All - - save_cache: - paths: - - C:\Users\circleci\.nuget\packages - key: gpt4all-csharp-nuget-packages-win - - run: - name: Build C# Project - command: | - cd gpt4all-bindings/csharp - dotnet.exe build Gpt4All --configuration Release --nologo - - run: - name: "Run C# Tests" - command: | - cd gpt4all-bindings/csharp - dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" - - run: - name: Test results - command: | - cd gpt4all-bindings/csharp/Gpt4All.Tests - dotnet tool install -g trx2junit - $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" - trx2junit TestResults/*.trx - - store_test_results: - path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults + steps: + - checkout + - restore_cache: + keys: + - gpt4all-csharp-nuget-packages-win + - attach_workspace: + at: C:\Users\circleci\workspace + - run: + name: "Prepare Native Libs" + command: | + cd gpt4all-bindings/csharp + mkdir -p runtimes\win-x64\native + cp C:\Users\circleci\workspace\runtimes\win-x64\*.dll runtimes\win-x64\native\ + ls -R runtimes + - run: + name: "Install project dependencies" + command: | + cd gpt4all-bindings/csharp + dotnet.exe restore Gpt4All + - save_cache: + paths: + - C:\Users\circleci\.nuget\packages + key: gpt4all-csharp-nuget-packages-win + - run: + name: Build C# Project + command: | + cd gpt4all-bindings/csharp + dotnet.exe build Gpt4All --configuration Release --nologo + - run: + name: "Run C# Tests" + command: | + cd gpt4all-bindings/csharp + dotnet.exe test Gpt4All.Tests -v n -c Release --filter "SKIP_ON_CI!=True" --logger "trx" + - run: + name: Test results + command: | + cd gpt4all-bindings/csharp/Gpt4All.Tests + dotnet tool install -g trx2junit + $Env:Path += ";$Env:USERPROFILE\.dotnet\tools" + trx2junit TestResults/*.trx + - store_test_results: + path: gpt4all-bindings/csharp/Gpt4All.Tests/TestResults build-csharp-macos: macos: From 33557b1f39e64648ecc2ea1cdc062e39d915e56a Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 7 Jul 2023 12:34:12 -0400 Subject: [PATCH 133/198] Move the implementation out of llmodel class. --- gpt4all-backend/llmodel.cpp | 22 +++++++------- gpt4all-backend/llmodel.h | 60 ++++++++++++++++++++----------------- 2 files changed, 43 insertions(+), 39 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index fdf3597a8b2e..5dd33535c308 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -41,7 +41,7 @@ static bool requires_avxonly() { #endif } -LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : dlhandle(new Dlhandle(std::move(dlhandle_))) { +LLImplementation::LLImplementation(Dlhandle &&dlhandle_) : dlhandle(new Dlhandle(std::move(dlhandle_))) { auto get_model_type = dlhandle->get("get_model_type"); assert(get_model_type); modelType = get_model_type(); @@ -54,7 +54,7 @@ LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : dlhandle(new Dlh assert(construct_); } -LLModel::Implementation::Implementation(Implementation &&o) +LLImplementation::LLImplementation(LLImplementation &&o) : construct_(o.construct_) , modelType(o.modelType) , buildVariant(o.buildVariant) @@ -63,19 +63,19 @@ LLModel::Implementation::Implementation(Implementation &&o) o.dlhandle = nullptr; } -LLModel::Implementation::~Implementation() { +LLImplementation::~LLImplementation() { if (dlhandle) delete dlhandle; } -bool LLModel::Implementation::isImplementation(const Dlhandle &dl) { +bool LLImplementation::isImplementation(const Dlhandle &dl) { return dl.get("is_g4a_backend_model_implementation"); } -const std::vector &LLModel::implementationList() { +const std::vector &LLModel::implementationList() { // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the // individual models without the cleanup of the static list interfering - static auto* libs = new std::vector([] () { - std::vector fres; + static auto* libs = new std::vector([] () { + std::vector fres; auto search_in_directory = [&](const std::string& paths) { std::stringstream ss(paths); @@ -90,10 +90,10 @@ const std::vector &LLModel::implementationList() { // Add to list if model implementation try { Dlhandle dl(p.string()); - if (!Implementation::isImplementation(dl)) { + if (!LLImplementation::isImplementation(dl)) { continue; } - fres.emplace_back(Implementation(std::move(dl))); + fres.emplace_back(LLImplementation(std::move(dl))); } catch (...) {} } } @@ -107,7 +107,7 @@ const std::vector &LLModel::implementationList() { return *libs; } -const LLModel::Implementation* LLModel::implementation(std::ifstream& f, const std::string& buildVariant) { +const LLImplementation* LLModel::implementation(std::ifstream& f, const std::string& buildVariant) { for (const auto& i : implementationList()) { f.seekg(0); if (!i.magicMatch(f)) continue; @@ -126,7 +126,7 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria std::ifstream f(modelPath, std::ios::binary); if (!f) return nullptr; // Get correct implementation - const LLModel::Implementation* impl = nullptr; + const LLImplementation* impl = nullptr; #if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs if (buildVariant == "auto") { diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index ce7a6f57bf15..920bc350e2a2 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -12,34 +12,11 @@ #define LLMODEL_MAX_PROMPT_BATCH 128 class Dlhandle; - +class LLImplementation; class LLModel { public: using Token = int32_t; - class Implementation { - LLModel *(*construct_)(); - - public: - Implementation(Dlhandle&&); - Implementation(const Implementation&) = delete; - Implementation(Implementation&&); - ~Implementation(); - - static bool isImplementation(const Dlhandle&); - - std::string_view modelType, buildVariant; - bool (*magicMatch)(std::ifstream& f); - Dlhandle *dlhandle; - - // The only way an implementation should be constructed - LLModel *construct() const { - auto fres = construct_(); - fres->m_implementation = this; - return fres; - } - }; - struct PromptContext { std::vector logits; // logits of current context std::vector tokens; // current tokens in the context window @@ -74,12 +51,12 @@ class LLModel { virtual void setThreadCount(int32_t /*n_threads*/) {} virtual int32_t threadCount() const { return 1; } - const Implementation& implementation() const { + const LLImplementation& implementation() const { return *m_implementation; } - static const std::vector& implementationList(); - static const Implementation *implementation(std::ifstream& f, const std::string& buildVariant); + static const std::vector& implementationList(); + static const LLImplementation *implementation(std::ifstream& f, const std::string& buildVariant); static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto"); static void setImplementationsSearchPath(const std::string& path); @@ -99,6 +76,33 @@ class LLModel { // shared by all base classes so it isn't virtual void recalculateContext(PromptContext &promptCtx, std::function recalculate); - const Implementation *m_implementation = nullptr; + const LLImplementation *m_implementation = nullptr; + +private: + friend class LLImplementation; }; + +class LLImplementation { + LLModel *(*construct_)(); + +public: + LLImplementation(Dlhandle&&); + LLImplementation(const LLImplementation&) = delete; + LLImplementation(LLImplementation&&); + ~LLImplementation(); + + static bool isImplementation(const Dlhandle&); + + std::string_view modelType, buildVariant; + bool (*magicMatch)(std::ifstream& f); + Dlhandle *dlhandle; + + // The only way an implementation should be constructed + LLModel *construct() const { + auto fres = construct_(); + fres->m_implementation = this; + return fres; + } +}; + #endif // LLMODEL_H From 1f749d7633a67f3272a7dff088b2e2c7e415fb45 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sat, 8 Jul 2023 10:04:38 -0400 Subject: [PATCH 134/198] Clean up backend code a bit and hide impl. details. --- gpt4all-backend/llmodel.cpp | 64 ++++++++++++++++-------------- gpt4all-backend/llmodel.h | 51 +++++++++++------------- gpt4all-backend/llmodel_c.cpp | 6 +-- gpt4all-backend/llmodel_shared.cpp | 12 +++--- gpt4all-chat/chatllm.cpp | 8 ++-- gpt4all-chat/llm.cpp | 2 +- 6 files changed, 72 insertions(+), 71 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index 5dd33535c308..d9300f04064e 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -41,41 +41,42 @@ static bool requires_avxonly() { #endif } -LLImplementation::LLImplementation(Dlhandle &&dlhandle_) : dlhandle(new Dlhandle(std::move(dlhandle_))) { - auto get_model_type = dlhandle->get("get_model_type"); +LLMImplementation::LLMImplementation(Dlhandle &&dlhandle_) + : m_dlhandle(new Dlhandle(std::move(dlhandle_))) { + auto get_model_type = m_dlhandle->get("get_model_type"); assert(get_model_type); - modelType = get_model_type(); - auto get_build_variant = dlhandle->get("get_build_variant"); + m_modelType = get_model_type(); + auto get_build_variant = m_dlhandle->get("get_build_variant"); assert(get_build_variant); - buildVariant = get_build_variant(); - magicMatch = dlhandle->get("magic_match"); + m_buildVariant = get_build_variant(); + m_magicMatch = m_dlhandle->get("magic_match"); assert(magicMatch); - construct_ = dlhandle->get("construct"); + m_construct = m_dlhandle->get("construct"); assert(construct_); } -LLImplementation::LLImplementation(LLImplementation &&o) - : construct_(o.construct_) - , modelType(o.modelType) - , buildVariant(o.buildVariant) - , magicMatch(o.magicMatch) - , dlhandle(o.dlhandle) { - o.dlhandle = nullptr; +LLMImplementation::LLMImplementation(LLMImplementation &&o) + : m_magicMatch(o.m_magicMatch) + , m_construct(o.m_construct) + , m_modelType(o.m_modelType) + , m_buildVariant(o.m_buildVariant) + , m_dlhandle(o.m_dlhandle) { + o.m_dlhandle = nullptr; } -LLImplementation::~LLImplementation() { - if (dlhandle) delete dlhandle; +LLMImplementation::~LLMImplementation() { + if (m_dlhandle) delete m_dlhandle; } -bool LLImplementation::isImplementation(const Dlhandle &dl) { +bool LLMImplementation::isImplementation(const Dlhandle &dl) { return dl.get("is_g4a_backend_model_implementation"); } -const std::vector &LLModel::implementationList() { +const std::vector &LLMImplementation::implementationList() { // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the // individual models without the cleanup of the static list interfering - static auto* libs = new std::vector([] () { - std::vector fres; + static auto* libs = new std::vector([] () { + std::vector fres; auto search_in_directory = [&](const std::string& paths) { std::stringstream ss(paths); @@ -90,10 +91,10 @@ const std::vector &LLModel::implementationList() { // Add to list if model implementation try { Dlhandle dl(p.string()); - if (!LLImplementation::isImplementation(dl)) { + if (!LLMImplementation::isImplementation(dl)) { continue; } - fres.emplace_back(LLImplementation(std::move(dl))); + fres.emplace_back(LLMImplementation(std::move(dl))); } catch (...) {} } } @@ -107,17 +108,17 @@ const std::vector &LLModel::implementationList() { return *libs; } -const LLImplementation* LLModel::implementation(std::ifstream& f, const std::string& buildVariant) { +const LLMImplementation* LLMImplementation::implementation(std::ifstream& f, const std::string& buildVariant) { for (const auto& i : implementationList()) { f.seekg(0); - if (!i.magicMatch(f)) continue; - if (buildVariant != i.buildVariant) continue; + if (!i.m_magicMatch(f)) continue; + if (buildVariant != i.m_buildVariant) continue; return &i; } return nullptr; } -LLModel *LLModel::construct(const std::string &modelPath, std::string buildVariant) { +LLModel *LLMImplementation::construct(const std::string &modelPath, std::string buildVariant) { if (!has_at_least_minimal_hardware()) return nullptr; @@ -126,7 +127,7 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria std::ifstream f(modelPath, std::ios::binary); if (!f) return nullptr; // Get correct implementation - const LLImplementation* impl = nullptr; + const LLMImplementation* impl = nullptr; #if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs if (buildVariant == "auto") { @@ -160,14 +161,17 @@ LLModel *LLModel::construct(const std::string &modelPath, std::string buildVaria if (!impl) return nullptr; } f.close(); + // Construct and return llmodel implementation - return impl->construct(); + auto fres = impl->m_construct(); + fres->m_implementation = impl; + return fres; } -void LLModel::setImplementationsSearchPath(const std::string& path) { +void LLMImplementation::setImplementationsSearchPath(const std::string& path) { s_implementations_search_path = path; } -const std::string& LLModel::implementationsSearchPath() { +const std::string& LLMImplementation::implementationsSearchPath() { return s_implementations_search_path; } diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index 920bc350e2a2..a5820174bdf3 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -12,7 +12,7 @@ #define LLMODEL_MAX_PROMPT_BATCH 128 class Dlhandle; -class LLImplementation; +class LLMImplementation; class LLModel { public: using Token = int32_t; @@ -51,17 +51,10 @@ class LLModel { virtual void setThreadCount(int32_t /*n_threads*/) {} virtual int32_t threadCount() const { return 1; } - const LLImplementation& implementation() const { + const LLMImplementation& implementation() const { return *m_implementation; } - static const std::vector& implementationList(); - static const LLImplementation *implementation(std::ifstream& f, const std::string& buildVariant); - static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto"); - - static void setImplementationsSearchPath(const std::string& path); - static const std::string& implementationsSearchPath(); - protected: // These are pure virtual because subclasses need to implement as the default implementation of // 'prompt' above calls these functions @@ -76,33 +69,37 @@ class LLModel { // shared by all base classes so it isn't virtual void recalculateContext(PromptContext &promptCtx, std::function recalculate); - const LLImplementation *m_implementation = nullptr; + const LLMImplementation *m_implementation = nullptr; private: - friend class LLImplementation; + friend class LLMImplementation; }; -class LLImplementation { - LLModel *(*construct_)(); - +class LLMImplementation { public: - LLImplementation(Dlhandle&&); - LLImplementation(const LLImplementation&) = delete; - LLImplementation(LLImplementation&&); - ~LLImplementation(); + LLMImplementation(Dlhandle&&); + LLMImplementation(const LLMImplementation&) = delete; + LLMImplementation(LLMImplementation&&); + ~LLMImplementation(); + + std::string_view modelType() const { return m_modelType; } + std::string_view buildVariant() const { return m_buildVariant; } static bool isImplementation(const Dlhandle&); + static const std::vector& implementationList(); + static const LLMImplementation *implementation(std::ifstream& f, const std::string& buildVariant); + static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto"); + static void setImplementationsSearchPath(const std::string& path); + static const std::string& implementationsSearchPath(); - std::string_view modelType, buildVariant; - bool (*magicMatch)(std::ifstream& f); - Dlhandle *dlhandle; +private: + bool (*m_magicMatch)(std::ifstream& f); + LLModel *(*m_construct)(); - // The only way an implementation should be constructed - LLModel *construct() const { - auto fres = construct_(); - fres->m_implementation = this; - return fres; - } +private: + std::string_view m_modelType; + std::string_view m_buildVariant; + Dlhandle *m_dlhandle; }; #endif // LLMODEL_H diff --git a/gpt4all-backend/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp index 15e5e89166f7..2364e4fa7ffa 100644 --- a/gpt4all-backend/llmodel_c.cpp +++ b/gpt4all-backend/llmodel_c.cpp @@ -29,7 +29,7 @@ llmodel_model llmodel_model_create2(const char *model_path, const char *build_va int error_code = 0; try { - wrapper->llModel = LLModel::construct(model_path, build_variant); + wrapper->llModel = LLMImplementation::construct(model_path, build_variant); } catch (const std::exception& e) { error_code = EINVAL; last_error_message = e.what(); @@ -180,10 +180,10 @@ int32_t llmodel_threadCount(llmodel_model model) void llmodel_set_implementation_search_path(const char *path) { - LLModel::setImplementationsSearchPath(path); + LLMImplementation::setImplementationsSearchPath(path); } const char *llmodel_get_implementation_search_path() { - return LLModel::implementationsSearchPath().c_str(); + return LLMImplementation::implementationsSearchPath().c_str(); } diff --git a/gpt4all-backend/llmodel_shared.cpp b/gpt4all-backend/llmodel_shared.cpp index cd4ace04b692..881ea5ec3c8d 100644 --- a/gpt4all-backend/llmodel_shared.cpp +++ b/gpt4all-backend/llmodel_shared.cpp @@ -33,7 +33,7 @@ void LLModel::prompt(const std::string &prompt, PromptContext &promptCtx) { if (!isModelLoaded()) { - std::cerr << implementation().modelType << " ERROR: prompt won't work with an unloaded model!\n"; + std::cerr << implementation().modelType() << " ERROR: prompt won't work with an unloaded model!\n"; return; } @@ -45,7 +45,7 @@ void LLModel::prompt(const std::string &prompt, if ((int) embd_inp.size() > promptCtx.n_ctx - 4) { responseCallback(-1, "ERROR: The prompt size exceeds the context window size and cannot be processed."); - std::cerr << implementation().modelType << " ERROR: The prompt is" << embd_inp.size() << + std::cerr << implementation().modelType() << " ERROR: The prompt is" << embd_inp.size() << "tokens and the context window is" << promptCtx.n_ctx << "!\n"; return; } @@ -64,7 +64,7 @@ void LLModel::prompt(const std::string &prompt, if (promptCtx.n_past + int32_t(batch.size()) > promptCtx.n_ctx) { const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase; // Erase the first percentage of context from the tokens... - std::cerr << implementation().modelType << ": reached the end of the context window so resizing\n"; + std::cerr << implementation().modelType() << ": reached the end of the context window so resizing\n"; promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint); promptCtx.n_past = promptCtx.tokens.size(); recalculateContext(promptCtx, recalculateCallback); @@ -72,7 +72,7 @@ void LLModel::prompt(const std::string &prompt, } if (!evalTokens(promptCtx, batch)) { - std::cerr << implementation().modelType << " ERROR: Failed to process prompt\n"; + std::cerr << implementation().modelType() << " ERROR: Failed to process prompt\n"; return; } @@ -103,7 +103,7 @@ void LLModel::prompt(const std::string &prompt, if (promptCtx.n_past + 1 > promptCtx.n_ctx) { const int32_t erasePoint = promptCtx.n_ctx * promptCtx.contextErase; // Erase the first percentage of context from the tokens... - std::cerr << implementation().modelType << ": reached the end of the context window so resizing\n"; + std::cerr << implementation().modelType() << ": reached the end of the context window so resizing\n"; promptCtx.tokens.erase(promptCtx.tokens.begin(), promptCtx.tokens.begin() + erasePoint); promptCtx.n_past = promptCtx.tokens.size(); recalculateContext(promptCtx, recalculateCallback); @@ -111,7 +111,7 @@ void LLModel::prompt(const std::string &prompt, } if (!evalTokens(promptCtx, { id })) { - std::cerr << implementation().modelType << " ERROR: Failed to predict next token\n"; + std::cerr << implementation().modelType() << " ERROR: Failed to predict next token\n"; return; } diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 181b84528a4b..fa11cdbbdf48 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -240,11 +240,11 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) #if defined(Q_OS_MAC) && defined(__arm__) if (m_forceMetal) - m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "metal"); + m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "metal"); else - m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto"); + m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto"); #else - m_llModelInfo.model = LLModel::construct(filePath.toStdString(), "auto"); + m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto"); #endif if (m_llModelInfo.model) { @@ -258,7 +258,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) m_llModelInfo = LLModelInfo(); emit modelLoadingError(QString("Could not load model due to invalid model file for %1").arg(modelInfo.filename())); } else { - switch (m_llModelInfo.model->implementation().modelType[0]) { + switch (m_llModelInfo.model->implementation().modelType()[0]) { case 'L': m_llModelType = LLModelType::LLAMA_; break; case 'G': m_llModelType = LLModelType::GPTJ_; break; case 'M': m_llModelType = LLModelType::MPT_; break; diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index f831ea477a32..ff62d43e7349 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -34,7 +34,7 @@ LLM::LLM() if (directoryExists(frameworksDir)) llmodelSearchPaths += ";" + frameworksDir; #endif - LLModel::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); + LLMImplementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); #if defined(__x86_64__) #ifndef _MSC_VER From ae8eb297ac51d9e6336a7e23549ca9ae69264dcd Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 7 Jul 2023 09:31:34 -0400 Subject: [PATCH 135/198] Add sbert backend. --- gpt4all-backend/CMakeLists.txt | 4 + gpt4all-backend/bert.cpp | 979 ++++++++++++++++++++++ gpt4all-backend/bert.h | 71 ++ gpt4all-chat/CMakeLists.txt | 2 + gpt4all-chat/cmake/deploy-qt-mac.cmake.in | 5 +- 5 files changed, 1060 insertions(+), 1 deletion(-) create mode 100644 gpt4all-backend/bert.cpp create mode 100644 gpt4all-backend/bert.h diff --git a/gpt4all-backend/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt index 5f91a88ecd4d..5ca3b688e6eb 100644 --- a/gpt4all-backend/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -125,6 +125,10 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS) add_library(mpt-${BUILD_VARIANT} SHARED mpt.cpp utils.h utils.cpp llmodel_shared.cpp llmodel_shared.h) prepare_target(mpt ggml-230511) + + add_library(bert-${BUILD_VARIANT} SHARED + bert.cpp utils.h utils.cpp llmodel_shared.cpp llmodel_shared.h) + prepare_target(bert llama-mainline) endif() endforeach() diff --git a/gpt4all-backend/bert.cpp b/gpt4all-backend/bert.cpp new file mode 100644 index 000000000000..318efdc7309e --- /dev/null +++ b/gpt4all-backend/bert.cpp @@ -0,0 +1,979 @@ +#include "bert.h" +#include "ggml.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define DEBUG_BERT + +// default hparams (all-MiniLM-L6-v2) +struct bert_hparams +{ + int32_t n_vocab = 30522; + int32_t n_max_tokens = 512; + int32_t n_embd = 256; + int32_t n_intermediate = 1536; + int32_t n_head = 12; + int32_t n_layer = 6; + int32_t f16 = 1; +}; + +struct bert_layer +{ + // normalization + struct ggml_tensor *ln_att_w; + struct ggml_tensor *ln_att_b; + + struct ggml_tensor *ln_out_w; + struct ggml_tensor *ln_out_b; + + // attention + struct ggml_tensor *q_w; + struct ggml_tensor *q_b; + struct ggml_tensor *k_w; + struct ggml_tensor *k_b; + struct ggml_tensor *v_w; + struct ggml_tensor *v_b; + + struct ggml_tensor *o_w; + struct ggml_tensor *o_b; + + // ff + struct ggml_tensor *ff_i_w; + struct ggml_tensor *ff_i_b; + + struct ggml_tensor *ff_o_w; + struct ggml_tensor *ff_o_b; +}; + +struct bert_vocab +{ + std::map token_to_id; + std::map subword_token_to_id; + + std::map _id_to_token; + std::map _id_to_subword_token; +}; + +struct bert_model +{ + bert_hparams hparams; + + // embeddings weights + struct ggml_tensor *word_embeddings; + struct ggml_tensor *token_type_embeddings; + struct ggml_tensor *position_embeddings; + struct ggml_tensor *ln_e_w; + struct ggml_tensor *ln_e_b; + + std::vector layers; + + struct ggml_context *ctx; + std::map tensors; +}; + +// Replacement for std::vector that doesn't require zero-initialization. +struct bert_buffer { + uint8_t * data = NULL; + size_t size = 0; + + void resize(size_t size) { + delete[] data; + data = new uint8_t[size]; + this->size = size; + } + + ~bert_buffer() { + delete[] data; + } +}; + + +struct bert_ctx +{ + bert_model model; + bert_vocab vocab; + + size_t mem_per_token; + int64_t mem_per_input; + int32_t max_batch_n; + bert_buffer buf_compute; +}; + +int32_t bert_n_embd(bert_ctx * ctx) +{ + return ctx->model.hparams.n_embd; +} + +int32_t bert_n_max_tokens(bert_ctx * ctx) +{ + return ctx->model.hparams.n_max_tokens; +} + +const char* bert_vocab_id_to_token(bert_ctx * ctx, bert_vocab_id id) { + bert_vocab & vocab = ctx->vocab; + auto it = vocab._id_to_token.find(id); + if (it != vocab._id_to_token.end()) + { + return it->second.c_str(); + } + it = vocab._id_to_subword_token.find(id); + if (it != vocab._id_to_subword_token.end()) + { + return it->second.c_str(); + } + return "[UNK TOKEN from bert_vocab]"; +} + +// +// Tokenizing +// + +static size_t utf8_len(char src) +{ + const size_t lookup[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4}; + uint8_t highbits = static_cast(src) >> 4; + return lookup[highbits]; +} + +std::string stripAccents(const std::string &inputString) +{ + std::string resultString; + std::map accentMap = {{"À", 'A'},{"Á", 'A'}, + {"Â", 'A'},{"Ã", 'A'},{"Ä", 'A'},{"Å", 'A'},{"à", 'a'},{"á", 'a'}, + {"â", 'a'},{"ã", 'a'},{"ä", 'a'},{"å", 'a'},{"È", 'E'},{"É", 'E'}, + {"Ê", 'E'},{"Ë", 'E'},{"è", 'e'},{"é", 'e'},{"ê", 'e'},{"ë", 'e'}, + {"Ì", 'I'},{"Í", 'I'},{"Î", 'I'},{"Ï", 'I'},{"ì", 'i'},{"í", 'i'}, + {"î", 'i'},{"ï", 'i'},{"Ò", 'O'},{"Ó", 'O'},{"Ô", 'O'},{"Õ", 'O'}, + {"Ö", 'O'},{"ò", 'o'},{"ó", 'o'},{"ô", 'o'},{"õ", 'o'},{"ö", 'o'}, + {"Ù", 'U'},{"Ú", 'U'},{"Û", 'U'},{"Ü", 'U'},{"ù", 'u'},{"ú", 'u'}, + {"û", 'u'},{"ü", 'u'},{"Ý", 'Y'},{"ý", 'y'},{"Ç", 'C'},{"ç", 'c'}, + {"Ñ", 'N'},{"ñ", 'n'}, + }; + + for (size_t i = 0; i < inputString.length();) + { + int len = utf8_len(inputString[i]); + std::string curChar = inputString.substr(i, len); + auto iter = accentMap.find(curChar); + if (iter != accentMap.end()) + { + resultString += iter->second; + } + else + { + resultString += curChar; + } + i += len; + } + + return resultString; +} + +std::string bert_normalize_prompt(const std::string &text) +{ + // TODO: handle chinese characters? https://github.com/huggingface/tokenizers/blob/ef5f50605ddf9f8caef1598c0e4853862b9707a7/tokenizers/src/normalizers/bert.rs#L98 + std::string text2 = stripAccents(text); + for (size_t i = 0; i < text2.size(); i += utf8_len(text2[i])) + { + char c = text2[i]; + if (c >= 'A' && c <= 'Z') + text2[i] = c - 'A' + 'a'; + } + return text2; +} +void bert_tokenize( + struct bert_ctx * ctx, + const char * text, + bert_vocab_id * tokens, + int32_t * n_tokens, + int32_t n_max_tokens) +{ + int cls_tok_id = 101; + int sep_tok_id = 102; + const bert_vocab &vocab = ctx->vocab; + + std::string str = text; + + std::vector words; + // first split the text into words + { + str = bert_normalize_prompt(str); + + std::string pat = R"([[:punct:]]|[[:alpha:]]+|[[:digit:]]+)"; + + std::regex re(pat); + std::smatch m; + + while (std::regex_search(str, m, re)) + { + for (std::string x : m) + { + words.push_back(x); + } + str = m.suffix(); + } + } + + int32_t t = 0; + tokens[t++] = cls_tok_id; + + // find the longest tokens that form the words: + for (const auto &word : words) + { + if (word.size() == 0) + continue; + + int i = 0; + int n = word.size(); + auto *token_map = &vocab.token_to_id; + loop: + while (i < n) + { + if (t >= n_max_tokens - 1) + break; + int j = n; + while (j > i) + { + auto it = token_map->find(word.substr(i, j - i)); + if (it != token_map->end()) + { + tokens[t++] = it->second; + i = j; + token_map = &vocab.subword_token_to_id; + goto loop; + } + --j; + } + if (j == i) + { + fprintf(stderr, "%s: unknown token '%s'\n", __func__, word.substr(i, 1).data()); + token_map = &vocab.subword_token_to_id; + ++i; + } + } + } + tokens[t++] = sep_tok_id; + *n_tokens = t; +} + +// +// Loading and setup +// + +struct bert_ctx * bert_load_from_file(const char *fname) +{ +#if defined(DEBUG_BERT) + printf("%s: loading model from '%s' - please wait ...\n", __func__, fname); +#endif + + auto fin = std::ifstream(fname, std::ios::binary); + if (!fin) + { + fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname); + return nullptr; + } + + // verify magic + { + uint32_t magic; + fin.read((char *)&magic, sizeof(magic)); + if (magic != 0x67676d6c) + { + fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname); + return nullptr; + } + } + + bert_ctx * new_bert = new bert_ctx; + bert_model & model = new_bert->model; + bert_vocab & vocab = new_bert->vocab; + + // load hparams + { + auto &hparams = model.hparams; + + fin.read((char *)&hparams.n_vocab, sizeof(hparams.n_vocab)); + fin.read((char *)&hparams.n_max_tokens, sizeof(hparams.n_max_tokens)); + fin.read((char *)&hparams.n_embd, sizeof(hparams.n_embd)); + fin.read((char *)&hparams.n_intermediate, sizeof(hparams.n_intermediate)); + fin.read((char *)&hparams.n_head, sizeof(hparams.n_head)); + fin.read((char *)&hparams.n_layer, sizeof(hparams.n_layer)); + fin.read((char *)&hparams.f16, sizeof(hparams.f16)); + +#if defined(DEBUG_BERT) + printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab); + printf("%s: n_max_tokens = %d\n", __func__, hparams.n_max_tokens); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_intermediate = %d\n", __func__, hparams.n_intermediate); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_layer = %d\n", __func__, hparams.n_layer); + printf("%s: f16 = %d\n", __func__, hparams.f16); +#endif + } + + // load vocab + { + int32_t n_vocab = model.hparams.n_vocab; + + std::string word; + for (int i = 0; i < n_vocab; i++) + { + uint32_t len; + fin.read((char *)&len, sizeof(len)); + + word.resize(len); + fin.read((char *)word.data(), len); + + if (word[0] == '#' && word[1] == '#') + { + vocab.subword_token_to_id[word.substr(2)] = i; + vocab._id_to_subword_token[i] = word; + } + + if (vocab.token_to_id.count(word) == 0) + { + vocab.token_to_id[word] = i; + vocab._id_to_token[i] = word; + } + } + } + + // for the big tensors, we have the option to store the data in 16-bit floats or quantized + // in order to save memory and also to speed up the computation + ggml_type wtype = GGML_TYPE_COUNT; + switch (model.hparams.f16) + { + case 0: + wtype = GGML_TYPE_F32; + break; + case 1: + wtype = GGML_TYPE_F16; + break; + case 2: + wtype = GGML_TYPE_Q4_0; + break; + case 3: + wtype = GGML_TYPE_Q4_1; + break; + default: + { + fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n", + __func__, fname, model.hparams.f16); + bert_free(new_bert); + return nullptr; + } + } + + auto &ctx = model.ctx; + + size_t model_mem_req = 0; + + { + const auto &hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_max_tokens = hparams.n_max_tokens; + const int n_intermediate = hparams.n_intermediate; + const int n_vocab = hparams.n_vocab; + + // Calculate size requirements + + model_mem_req += n_embd * n_vocab * ggml_type_sizef(wtype); // word_embeddings + model_mem_req += n_embd * 2 * ggml_type_sizef(wtype); // token_type_embeddings + model_mem_req += n_embd * n_max_tokens * ggml_type_sizef(wtype); // position_embeddings + + model_mem_req += 2 * n_embd * ggml_type_sizef(GGML_TYPE_F32); // ln_e_* + + model_mem_req += 4 * n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_* + + model_mem_req += 4 * n_layer * (n_embd * n_embd * ggml_type_sizef(wtype)); // kqvo weights + model_mem_req += 4 * n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // kqvo bias + + model_mem_req += 2 * n_layer * (n_embd * n_intermediate * ggml_type_sizef(wtype)); // ff_*_w + model_mem_req += n_layer * (n_intermediate * ggml_type_sizef(GGML_TYPE_F32)); // ff_i_b + model_mem_req += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ff_o_b + + model_mem_req += (5 + 16 * n_layer) * 256; // object overhead + +#if defined(DEBUG_BERT) + printf("%s: ggml ctx size = %6.2f MB\n", __func__, model_mem_req / (1024.0 * 1024.0)); +#endif + } + + // create the ggml context + { + struct ggml_init_params params = { + .mem_size = model_mem_req, + .mem_buffer = NULL, + .no_alloc = false, + }; + + model.ctx = ggml_init(params); + if (!model.ctx) + { + fprintf(stderr, "%s: ggml_init() failed\n", __func__); + bert_free(new_bert); + return nullptr; + } + } + + // prepare memory for the weights + { + const auto &hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_intermediate = hparams.n_intermediate; + const int n_max_tokens = hparams.n_max_tokens; + const int n_vocab = hparams.n_vocab; + + model.layers.resize(n_layer); + + model.word_embeddings = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab); + model.token_type_embeddings = ggml_new_tensor_2d(ctx, wtype, n_embd, 2); + model.position_embeddings = ggml_new_tensor_2d(ctx, wtype, n_embd, n_max_tokens); + + model.ln_e_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + model.ln_e_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + + // map by name + model.tensors["embeddings.word_embeddings.weight"] = model.word_embeddings; + model.tensors["embeddings.token_type_embeddings.weight"] = model.token_type_embeddings; + model.tensors["embeddings.position_embeddings.weight"] = model.position_embeddings; + + model.tensors["embeddings.LayerNorm.weight"] = model.ln_e_w; + model.tensors["embeddings.LayerNorm.bias"] = model.ln_e_b; + + for (int i = 0; i < n_layer; ++i) + { + auto &layer = model.layers[i]; + + layer.ln_att_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.ln_att_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.ln_out_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.ln_out_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + + layer.q_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.q_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.k_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.k_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.v_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.v_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + layer.o_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd); + layer.o_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + + layer.ff_i_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_intermediate); + layer.ff_i_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_intermediate); + + layer.ff_o_w = ggml_new_tensor_2d(ctx, wtype, n_intermediate, n_embd); + layer.ff_o_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd); + + // map by name + + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.query.weight"] = layer.q_w; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.query.bias"] = layer.q_b; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.key.weight"] = layer.k_w; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.key.bias"] = layer.k_b; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.value.weight"] = layer.v_w; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.self.value.bias"] = layer.v_b; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.output.LayerNorm.weight"] = layer.ln_att_w; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.output.LayerNorm.bias"] = layer.ln_att_b; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.output.dense.weight"] = layer.o_w; + model.tensors["encoder.layer." + std::to_string(i) + ".attention.output.dense.bias"] = layer.o_b; + + model.tensors["encoder.layer." + std::to_string(i) + ".intermediate.dense.weight"] = layer.ff_i_w; + model.tensors["encoder.layer." + std::to_string(i) + ".intermediate.dense.bias"] = layer.ff_i_b; + + model.tensors["encoder.layer." + std::to_string(i) + ".output.LayerNorm.weight"] = layer.ln_out_w; + model.tensors["encoder.layer." + std::to_string(i) + ".output.LayerNorm.bias"] = layer.ln_out_b; + model.tensors["encoder.layer." + std::to_string(i) + ".output.dense.weight"] = layer.ff_o_w; + model.tensors["encoder.layer." + std::to_string(i) + ".output.dense.bias"] = layer.ff_o_b; + } + } + + // load weights + { + int n_tensors = 0; + size_t total_size = 0; + +#if defined(DEBUG_BERT) + printf("%s: ", __func__); +#endif + + while (true) + { + int32_t n_dims; + int32_t length; + int32_t ftype; + + fin.read(reinterpret_cast(&n_dims), sizeof(n_dims)); + fin.read(reinterpret_cast(&length), sizeof(length)); + fin.read(reinterpret_cast(&ftype), sizeof(ftype)); + + if (fin.eof()) + { + break; + } + + int64_t nelements = 1; + int64_t ne[2] = {1, 1}; + for (int i = 0; i < n_dims; ++i) + { + int32_t ne_cur; + fin.read(reinterpret_cast(&ne_cur), sizeof(ne_cur)); + ne[i] = ne_cur; + nelements *= ne[i]; + } + + std::string name(length, 0); + fin.read(&name[0], length); + + if (model.tensors.find(name.data()) == model.tensors.end()) + { + fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data()); + bert_free(new_bert); + return nullptr; + } + + auto tensor = model.tensors[name.data()]; + if (ggml_nelements(tensor) != nelements) + { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data()); + bert_free(new_bert); + return nullptr; + } + + if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) + { + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%ld, %ld], expected [%ld, %ld]\n", + __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); + bert_free(new_bert); + return nullptr; + } + +#if defined(DEBUG_BERT) + static const char *ftype_str[] = { + "f32", + "f16", + "q4_0", + "q4_1", + }; + printf("%24s - [%5ld, %5ld], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ftype_str[ftype], ggml_nbytes(tensor) / 1024.0 / 1024.0, ggml_nbytes(tensor)); +#endif + + size_t bpe = 0; + + switch (ftype) + { + case 0: + bpe = ggml_type_size(GGML_TYPE_F32); + break; + case 1: + bpe = ggml_type_size(GGML_TYPE_F16); + break; + case 2: + bpe = ggml_type_size(GGML_TYPE_Q4_0); + assert(ne[0] % 64 == 0); + break; + case 3: + bpe = ggml_type_size(GGML_TYPE_Q4_1); + assert(ne[0] % 64 == 0); + break; + default: + { + fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype); + bert_free(new_bert); + return nullptr; + } + }; + + if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) + { + fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %lu\n", + __func__, name.data(), ggml_nbytes(tensor), nelements * bpe); + bert_free(new_bert); + return nullptr; + } + + fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); + + // printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); + total_size += ggml_nbytes(tensor); + + if (++n_tensors % 8 == 0) + { +#if defined(DEBUG_BERT) + printf("."); + fflush(stdout); +#endif + } + } + +#if defined(DEBUG_BERT) + printf(" done\n"); + printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size / 1024.0 / 1024.0, n_tensors); +#endif + } + + fin.close(); + + // Calculate space requirements for setting up context buffers later + { + bert_vocab_id tokens[] = {0, 1, 2, 3}; + // TODO: We set the initial buffer size to 16MB and hope it's enough. Maybe there is a better way to do this? + new_bert->buf_compute.resize(16 * 1024 * 1024); + bert_eval(new_bert, 1, tokens, 4, nullptr); + new_bert->max_batch_n = 0; + + // TODO: Max tokens should be a param? + int32_t N = new_bert->model.hparams.n_max_tokens; + new_bert->mem_per_input = 1.1 * (new_bert->mem_per_token * N); // add 10% to account for ggml object overhead + + } +#if defined(DEBUG_BERT) + printf("%s: mem_per_token %ld KB, mem_per_input %ld MB\n", __func__, new_bert->mem_per_token / (1 << 10), new_bert->mem_per_input / (1 << 20)); +#endif + + return new_bert; +} + +void bert_resize_ctx(bert_ctx * ctx, int32_t new_size) { + int64_t buf_size_new = ctx->mem_per_input * new_size; + + // TODO: Max memory should be a param? Now just 1 GB + int64_t GB = 1 << 30; + //printf("%s: requested_buf_size %ldMB\n", __func__, buf_size_new / (1 << 20)); + if (buf_size_new > GB) { + int32_t adjusted_new_size = GB / ctx->mem_per_input; + if (adjusted_new_size < 1) adjusted_new_size = 1; + //printf("%s: requested batch size %d, actual new batch size %d\n", __func__, new_size, adjusted_new_size); + new_size = adjusted_new_size; + buf_size_new = ctx->mem_per_input * new_size; + } + if (new_size > ctx->max_batch_n) { + ctx->buf_compute.resize(buf_size_new); + ctx->max_batch_n = new_size; + } +} + +void bert_free(bert_ctx * ctx) { + ggml_free(ctx->model.ctx); + delete ctx; +} + +void bert_eval( + struct bert_ctx *ctx, + int32_t n_threads, + bert_vocab_id *tokens, + int32_t n_tokens, + float *embeddings) +{ + bert_eval_batch(ctx, n_threads, 1, &tokens, &n_tokens, embeddings ? &embeddings : nullptr); +} + +void bert_eval_batch( + bert_ctx * ctx, + int32_t n_threads, + int32_t n_batch_size, + bert_vocab_id ** batch_tokens, + int32_t * n_tokens, + float ** batch_embeddings) +{ + const bert_model& model = ctx->model; + bool mem_req_mode = !batch_embeddings; + // batch_embeddings is nullptr for the initial memory requirements run + if (!mem_req_mode && n_batch_size > ctx->max_batch_n) { + bert_resize_ctx(ctx, n_batch_size); + if (n_batch_size > ctx->max_batch_n) { + fprintf(stderr, "%s: tried to increase buffers to batch size %d but failed\n", __func__, n_batch_size); + return; + } + } + + // TODO: implement real batching + for (int ba = 0; ba < n_batch_size; ba++) + { + const int N = n_tokens[ba]; + const auto &tokens = batch_tokens[ba]; + + const auto &hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_max_tokens = hparams.n_max_tokens; + const int n_head = hparams.n_head; + + const int d_head = n_embd / n_head; + + std::vector result; + if (N > n_max_tokens) + { + fprintf(stderr, "Too many tokens, maximum is %d\n", n_max_tokens); + return; + } + + auto & mem_per_token = ctx->mem_per_token; + auto & buf_compute = ctx->buf_compute; + + struct ggml_init_params params = { + .mem_size = buf_compute.size, + .mem_buffer = buf_compute.data, + .no_alloc = false, + }; + + struct ggml_context *ctx0 = ggml_init(params); + struct ggml_cgraph gf = {}; + gf.n_threads = n_threads; + + // Embeddings. word_embeddings + token_type_embeddings + position_embeddings + struct ggml_tensor *token_layer = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(token_layer->data, tokens, N * ggml_element_size(token_layer)); + + struct ggml_tensor *token_types = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + ggml_set_zero(token_types); + + struct ggml_tensor *positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + for (int i = 0; i < N; i++) + { + ggml_set_i32_1d(positions, i, i); + } + + struct ggml_tensor *inpL = ggml_get_rows(ctx0, model.word_embeddings, token_layer); + + inpL = ggml_add(ctx0, + ggml_get_rows(ctx0, model.token_type_embeddings, token_types), + inpL); + inpL = ggml_add(ctx0, + ggml_get_rows(ctx0, model.position_embeddings, positions), + inpL); + + // embd norm + { + inpL = ggml_norm(ctx0, inpL); + + inpL = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.ln_e_w, inpL), + inpL), + ggml_repeat(ctx0, model.ln_e_b, inpL)); + } + // layers + for (int il = 0; il < n_layer; il++) + { + struct ggml_tensor *cur = inpL; + + // self-attention + { + struct ggml_tensor *Qcur = cur; + Qcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, Qcur), + ggml_mul_mat(ctx0, model.layers[il].q_w, Qcur)), + d_head, n_head, N); + struct ggml_tensor *Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + + struct ggml_tensor *Kcur = cur; + Kcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].k_b, Kcur), + ggml_mul_mat(ctx0, model.layers[il].k_w, Kcur)), + d_head, n_head, N); + struct ggml_tensor *K = ggml_permute(ctx0, Kcur, 0, 2, 1, 3); + + struct ggml_tensor *Vcur = cur; + Vcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].v_b, Vcur), + ggml_mul_mat(ctx0, model.layers[il].v_w, Vcur)), + d_head, n_head, N); + struct ggml_tensor *V = ggml_permute(ctx0, Vcur, 0, 2, 1, 3); + + struct ggml_tensor *KQ = ggml_mul_mat(ctx0, K, Q); + // KQ = soft_max(KQ / sqrt(head width)) + KQ = ggml_soft_max(ctx0, + ggml_scale(ctx0, + KQ, + ggml_new_f32(ctx0, 1.0f / sqrt((float)d_head)))); + + V = ggml_cont(ctx0, ggml_transpose(ctx0, V)); + struct ggml_tensor *KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + cur = ggml_cpy(ctx0, + KQV, + ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + } + // attention output + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].o_b, cur), + ggml_mul_mat(ctx0, model.layers[il].o_w, cur)); + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + // attention norm + { + cur = ggml_norm(ctx0, cur); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_att_w, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_att_b, cur)); + } + struct ggml_tensor *att_output = cur; + // intermediate_output = self.intermediate(attention_output) + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].ff_i_b, cur), + cur); + cur = ggml_gelu(ctx0, cur); + + // layer_output = self.output(intermediate_output, attention_output) + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].ff_o_b, cur), + cur); + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, att_output, cur); + + // output norm + { + cur = ggml_norm(ctx0, cur); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_out_w, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_out_b, cur)); + } + inpL = cur; + } + inpL = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); + // pooler + struct ggml_tensor *sum = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, N, 1); + ggml_set_f32(sum, 1.0f / N); + inpL = ggml_mul_mat(ctx0, inpL, sum); + + // normalizer + ggml_tensor *length = ggml_sqrt(ctx0, + ggml_sum(ctx0, ggml_sqr(ctx0, inpL))); + inpL = ggml_scale(ctx0, inpL, ggml_div(ctx0, ggml_new_f32(ctx0, 1.0f), length)); + + ggml_tensor *output = inpL; + // run the computation + ggml_build_forward_expand(&gf, output); + ggml_graph_compute(ctx0, &gf); + + + // float *dat = ggml_get_data_f32(output); + // pretty_print_tensor(dat, output->ne, output->nb, output->n_dims - 1, ""); + + #ifdef GGML_PERF + // print timing information per ggml operation (for debugging purposes) + // requires GGML_PERF to be defined + ggml_graph_print(&gf); + #endif + + if (!mem_req_mode) { + memcpy(batch_embeddings[ba], (float *)ggml_get_data(output), sizeof(float) * n_embd); + } else { + mem_per_token = ggml_used_mem(ctx0) / N; + + // printf("used_mem = %zu KB \n", ggml_used_mem(ctx0) / 1024); + // printf("mem_per_token = %zu KB \n", mem_per_token / 1024); + } + + ggml_free(ctx0); + } +} + +void bert_encode( + struct bert_ctx *ctx, + int32_t n_threads, + const char *texts, + float *embeddings) +{ + bert_encode_batch(ctx, n_threads, 1, 1, &texts, &embeddings); +} + +void bert_encode_batch( + struct bert_ctx *ctx, + int32_t n_threads, + int32_t n_batch_size, + int32_t n_inputs, + const char ** texts, + float **embeddings) +{ + // TODO: Disable batching for now + n_batch_size = 1; + /* + if (n_batch_size > n_inputs) { + n_batch_size = n_inputs; + } + if (n_batch_size > ctx->max_batch_n) { + bert_resize_ctx(ctx, n_batch_size); + n_batch_size = ctx->max_batch_n; + } + */ + + int32_t N = bert_n_max_tokens(ctx); + + std::vector buf_tokens; + // Most of this buffer will be unused in typical case where inputs are not that long. + buf_tokens.resize(N * n_inputs); + std::vector n_tokens = std::vector(n_inputs); + std::vector unsorted_tokens(n_inputs); + bert_vocab_id* it_tokens = buf_tokens.data(); + for (int i = 0; i < n_inputs; i++) { + unsorted_tokens[i] = it_tokens; + bert_tokenize(ctx, texts[i], it_tokens, &n_tokens[i], N); + it_tokens += n_tokens[i]; + } + + if (n_batch_size == n_inputs) { + bert_eval_batch(ctx, n_threads, n_batch_size, unsorted_tokens.data(), n_tokens.data(), embeddings); + } else { + // sort the inputs by tokenized length, batch and eval + + std::vector indices; + indices.reserve(n_inputs); + for (int i = 0; i < n_inputs; i++) + { + indices.push_back(i); + } + + std::vector sorted_n_tokens = std::vector(n_inputs); + + std::vector sorted_tokens(n_inputs); + + std::sort(indices.begin(), indices.end(), [&](int a, int b) + { return n_tokens[a] < n_tokens[b]; }); + + std::vector sorted_embeddings(n_inputs); + memcpy(sorted_embeddings.data(), embeddings, n_inputs * sizeof(float *)); + + for (int i = 0; i < n_inputs; i++) { + sorted_embeddings[i] = embeddings[indices[i]]; + sorted_tokens[i] = unsorted_tokens[indices[i]]; + sorted_n_tokens[i] = n_tokens[indices[i]]; + } + + for (int i = 0; i < n_inputs; i += n_batch_size) + { + if (i + n_batch_size > n_inputs) { + n_batch_size = n_inputs - i; + } + bert_eval_batch(ctx, n_threads, n_batch_size, &sorted_tokens[i], &sorted_n_tokens[i], &sorted_embeddings[i]); + } + } +} diff --git a/gpt4all-backend/bert.h b/gpt4all-backend/bert.h new file mode 100644 index 000000000000..28435edebf0e --- /dev/null +++ b/gpt4all-backend/bert.h @@ -0,0 +1,71 @@ +#ifndef BERT_H +#define BERT_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct bert_ctx; + +typedef int32_t bert_vocab_id; + +struct bert_ctx * bert_load_from_file(const char * fname); +void bert_free(bert_ctx * ctx); + +// Main api, does both tokenizing and evaluation + +void bert_encode( + struct bert_ctx * ctx, + int32_t n_threads, + const char * texts, + float * embeddings); + +// n_batch_size - how many to process at a time +// n_inputs - total size of texts and embeddings arrays +void bert_encode_batch( + struct bert_ctx * ctx, + int32_t n_threads, + int32_t n_batch_size, + int32_t n_inputs, + const char ** texts, + float ** embeddings); + +// Api for separate tokenization & eval + +void bert_tokenize( + struct bert_ctx * ctx, + const char * text, + bert_vocab_id * tokens, + int32_t * n_tokens, + int32_t n_max_tokens); + +void bert_eval( + struct bert_ctx * ctx, + int32_t n_threads, + bert_vocab_id * tokens, + int32_t n_tokens, + float * embeddings); + +// NOTE: for batch processing the longest input must be first +void bert_eval_batch( + struct bert_ctx * ctx, + int32_t n_threads, + int32_t n_batch_size, + bert_vocab_id ** batch_tokens, + int32_t * n_tokens, + float ** batch_embeddings); + +int32_t bert_n_embd(bert_ctx * ctx); +int32_t bert_n_max_tokens(bert_ctx * ctx); + +const char* bert_vocab_id_to_token(bert_ctx * ctx, bert_vocab_id id); + +#ifdef __cplusplus +} +#endif + +#endif // BERT_H diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt index 8bd5becc5643..c380cccda687 100644 --- a/gpt4all-chat/CMakeLists.txt +++ b/gpt4all-chat/CMakeLists.txt @@ -206,6 +206,8 @@ install(TARGETS replit-mainline-default DESTINATION lib COMPONENT ${COMPONENT_NA if(APPLE) install(TARGETS replit-mainline-metal DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) endif() +install(TARGETS bert-avxonly DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) +install(TARGETS bert-default DESTINATION lib COMPONENT ${COMPONENT_NAME_MAIN}) set(CPACK_GENERATOR "IFW") set(CPACK_VERBATIM_VARIABLES YES) diff --git a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in index 1301f28c4837..2420599087d8 100644 --- a/gpt4all-chat/cmake/deploy-qt-mac.cmake.in +++ b/gpt4all-chat/cmake/deploy-qt-mac.cmake.in @@ -7,16 +7,19 @@ file(GLOB MYMPTLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NA file(GLOB MYLLAMALIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllama*) file(GLOB MYREPLITLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libreplit*) file(GLOB MYFALCONLLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libfalcon*) +file(GLOB MYBERTLLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libbert*) file(GLOB MYLLMODELLIBS ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/lib/libllmodel.*) file(COPY ${MYGPTJLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYMPTLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) +file(COPY ${MYLLAMALIBS} + DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYREPLITLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYFALCONLLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) -file(COPY ${MYLLAMALIBS} +file(COPY ${MYBERTLLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) file(COPY ${MYLLMODELLIBS} DESTINATION ${CPACK_TEMPORARY_INSTALL_DIRECTORY}/packages/${COMPONENT_NAME_MAIN}/data/bin/gpt4all.app/Contents/Frameworks) From 315a1f2aa2a2b38c409d6d2aadab5631084e8703 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 11:00:20 -0400 Subject: [PATCH 136/198] Move it back as internal class. --- gpt4all-backend/llmodel.cpp | 35 +++++++++--------- gpt4all-backend/llmodel.h | 58 +++++++++++++++--------------- gpt4all-backend/llmodel_c.cpp | 6 ++-- gpt4all-backend/llmodel_shared.cpp | 4 +-- gpt4all-chat/chatllm.cpp | 2 +- gpt4all-chat/llm.cpp | 2 +- 6 files changed, 53 insertions(+), 54 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index d9300f04064e..b0e498087e51 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -41,7 +41,7 @@ static bool requires_avxonly() { #endif } -LLMImplementation::LLMImplementation(Dlhandle &&dlhandle_) +LLModel::Implementation::Implementation(Dlhandle &&dlhandle_) : m_dlhandle(new Dlhandle(std::move(dlhandle_))) { auto get_model_type = m_dlhandle->get("get_model_type"); assert(get_model_type); @@ -50,12 +50,12 @@ LLMImplementation::LLMImplementation(Dlhandle &&dlhandle_) assert(get_build_variant); m_buildVariant = get_build_variant(); m_magicMatch = m_dlhandle->get("magic_match"); - assert(magicMatch); + assert(m_magicMatch); m_construct = m_dlhandle->get("construct"); - assert(construct_); + assert(m_construct); } -LLMImplementation::LLMImplementation(LLMImplementation &&o) +LLModel::Implementation::Implementation(Implementation &&o) : m_magicMatch(o.m_magicMatch) , m_construct(o.m_construct) , m_modelType(o.m_modelType) @@ -64,19 +64,19 @@ LLMImplementation::LLMImplementation(LLMImplementation &&o) o.m_dlhandle = nullptr; } -LLMImplementation::~LLMImplementation() { +LLModel::Implementation::~Implementation() { if (m_dlhandle) delete m_dlhandle; } -bool LLMImplementation::isImplementation(const Dlhandle &dl) { +bool LLModel::Implementation::isImplementation(const Dlhandle &dl) { return dl.get("is_g4a_backend_model_implementation"); } -const std::vector &LLMImplementation::implementationList() { +const std::vector &LLModel::Implementation::implementationList() { // NOTE: allocated on heap so we leak intentionally on exit so we have a chance to clean up the // individual models without the cleanup of the static list interfering - static auto* libs = new std::vector([] () { - std::vector fres; + static auto* libs = new std::vector([] () { + std::vector fres; auto search_in_directory = [&](const std::string& paths) { std::stringstream ss(paths); @@ -91,10 +91,10 @@ const std::vector &LLMImplementation::implementationList() { // Add to list if model implementation try { Dlhandle dl(p.string()); - if (!LLMImplementation::isImplementation(dl)) { + if (!Implementation::isImplementation(dl)) { continue; } - fres.emplace_back(LLMImplementation(std::move(dl))); + fres.emplace_back(Implementation(std::move(dl))); } catch (...) {} } } @@ -108,7 +108,7 @@ const std::vector &LLMImplementation::implementationList() { return *libs; } -const LLMImplementation* LLMImplementation::implementation(std::ifstream& f, const std::string& buildVariant) { +const LLModel::Implementation* LLModel::Implementation::implementation(std::ifstream& f, const std::string& buildVariant) { for (const auto& i : implementationList()) { f.seekg(0); if (!i.m_magicMatch(f)) continue; @@ -118,7 +118,7 @@ const LLMImplementation* LLMImplementation::implementation(std::ifstream& f, con return nullptr; } -LLModel *LLMImplementation::construct(const std::string &modelPath, std::string buildVariant) { +LLModel *LLModel::Implementation::construct(const std::string &modelPath, std::string buildVariant) { if (!has_at_least_minimal_hardware()) return nullptr; @@ -127,14 +127,15 @@ LLModel *LLMImplementation::construct(const std::string &modelPath, std::string std::ifstream f(modelPath, std::ios::binary); if (!f) return nullptr; // Get correct implementation - const LLMImplementation* impl = nullptr; + const Implementation* impl = nullptr; #if defined(__APPLE__) && defined(__arm64__) // FIXME: See if metal works for intel macs if (buildVariant == "auto") { size_t total_mem = getSystemTotalRAMInBytes(); impl = implementation(f, "metal"); if(impl) { - LLModel* metalimpl = impl->construct(); + LLModel* metalimpl = impl->m_construct(); + metalimpl->m_implementation = impl; size_t req_mem = metalimpl->requiredMem(modelPath); float req_to_total = (float) req_mem / (float) total_mem; // on a 16GB M2 Mac a 13B q4_0 (0.52) works for me but a 13B q4_K_M (0.55) does not @@ -168,10 +169,10 @@ LLModel *LLMImplementation::construct(const std::string &modelPath, std::string return fres; } -void LLMImplementation::setImplementationsSearchPath(const std::string& path) { +void LLModel::Implementation::setImplementationsSearchPath(const std::string& path) { s_implementations_search_path = path; } -const std::string& LLMImplementation::implementationsSearchPath() { +const std::string& LLModel::Implementation::implementationsSearchPath() { return s_implementations_search_path; } diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index a5820174bdf3..06f9d618b62c 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -12,10 +12,35 @@ #define LLMODEL_MAX_PROMPT_BATCH 128 class Dlhandle; -class LLMImplementation; class LLModel { public: using Token = int32_t; + class Implementation { + public: + Implementation(Dlhandle&&); + Implementation(const Implementation&) = delete; + Implementation(Implementation&&); + ~Implementation(); + + std::string_view modelType() const { return m_modelType; } + std::string_view buildVariant() const { return m_buildVariant; } + + static bool isImplementation(const Dlhandle&); + static const std::vector& implementationList(); + static const Implementation *implementation(std::ifstream& f, const std::string& buildVariant); + static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto"); + static void setImplementationsSearchPath(const std::string& path); + static const std::string& implementationsSearchPath(); + + private: + bool (*m_magicMatch)(std::ifstream& f); + LLModel *(*m_construct)(); + + private: + std::string_view m_modelType; + std::string_view m_buildVariant; + Dlhandle *m_dlhandle; + }; struct PromptContext { std::vector logits; // logits of current context @@ -51,7 +76,7 @@ class LLModel { virtual void setThreadCount(int32_t /*n_threads*/) {} virtual int32_t threadCount() const { return 1; } - const LLMImplementation& implementation() const { + const Implementation& implementation() const { return *m_implementation; } @@ -69,37 +94,10 @@ class LLModel { // shared by all base classes so it isn't virtual void recalculateContext(PromptContext &promptCtx, std::function recalculate); - const LLMImplementation *m_implementation = nullptr; + const Implementation *m_implementation = nullptr; private: friend class LLMImplementation; }; -class LLMImplementation { -public: - LLMImplementation(Dlhandle&&); - LLMImplementation(const LLMImplementation&) = delete; - LLMImplementation(LLMImplementation&&); - ~LLMImplementation(); - - std::string_view modelType() const { return m_modelType; } - std::string_view buildVariant() const { return m_buildVariant; } - - static bool isImplementation(const Dlhandle&); - static const std::vector& implementationList(); - static const LLMImplementation *implementation(std::ifstream& f, const std::string& buildVariant); - static LLModel *construct(const std::string &modelPath, std::string buildVariant = "auto"); - static void setImplementationsSearchPath(const std::string& path); - static const std::string& implementationsSearchPath(); - -private: - bool (*m_magicMatch)(std::ifstream& f); - LLModel *(*m_construct)(); - -private: - std::string_view m_modelType; - std::string_view m_buildVariant; - Dlhandle *m_dlhandle; -}; - #endif // LLMODEL_H diff --git a/gpt4all-backend/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp index 2364e4fa7ffa..c7e13f79a24e 100644 --- a/gpt4all-backend/llmodel_c.cpp +++ b/gpt4all-backend/llmodel_c.cpp @@ -29,7 +29,7 @@ llmodel_model llmodel_model_create2(const char *model_path, const char *build_va int error_code = 0; try { - wrapper->llModel = LLMImplementation::construct(model_path, build_variant); + wrapper->llModel = LLModel::Implementation::construct(model_path, build_variant); } catch (const std::exception& e) { error_code = EINVAL; last_error_message = e.what(); @@ -180,10 +180,10 @@ int32_t llmodel_threadCount(llmodel_model model) void llmodel_set_implementation_search_path(const char *path) { - LLMImplementation::setImplementationsSearchPath(path); + LLModel::Implementation::setImplementationsSearchPath(path); } const char *llmodel_get_implementation_search_path() { - return LLMImplementation::implementationsSearchPath().c_str(); + return LLModel::Implementation::implementationsSearchPath().c_str(); } diff --git a/gpt4all-backend/llmodel_shared.cpp b/gpt4all-backend/llmodel_shared.cpp index 881ea5ec3c8d..fe1db76397dd 100644 --- a/gpt4all-backend/llmodel_shared.cpp +++ b/gpt4all-backend/llmodel_shared.cpp @@ -45,8 +45,8 @@ void LLModel::prompt(const std::string &prompt, if ((int) embd_inp.size() > promptCtx.n_ctx - 4) { responseCallback(-1, "ERROR: The prompt size exceeds the context window size and cannot be processed."); - std::cerr << implementation().modelType() << " ERROR: The prompt is" << embd_inp.size() << - "tokens and the context window is" << promptCtx.n_ctx << "!\n"; + std::cerr << implementation().modelType() << " ERROR: The prompt is " << embd_inp.size() << + " tokens and the context window is " << promptCtx.n_ctx << "!\n"; return; } diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index fa11cdbbdf48..37c92d536ec6 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -244,7 +244,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) else m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto"); #else - m_llModelInfo.model = LLMImplementation::construct(filePath.toStdString(), "auto"); + m_llModelInfo.model = LLModel::Implementation::construct(filePath.toStdString(), "auto"); #endif if (m_llModelInfo.model) { diff --git a/gpt4all-chat/llm.cpp b/gpt4all-chat/llm.cpp index ff62d43e7349..7953b2962793 100644 --- a/gpt4all-chat/llm.cpp +++ b/gpt4all-chat/llm.cpp @@ -34,7 +34,7 @@ LLM::LLM() if (directoryExists(frameworksDir)) llmodelSearchPaths += ";" + frameworksDir; #endif - LLMImplementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); + LLModel::Implementation::setImplementationsSearchPath(llmodelSearchPaths.toStdString()); #if defined(__x86_64__) #ifndef _MSC_VER From 0efdbfcffee88b7b1e17b9d902eead654c38585a Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Sun, 9 Jul 2023 11:32:51 -0400 Subject: [PATCH 137/198] Bert --- gpt4all-backend/bert.cpp | 714 ++++++++++-------- gpt4all-backend/bert.h | 71 -- gpt4all-backend/bert_impl.h | 44 ++ gpt4all-backend/falcon_impl.h | 2 + gpt4all-backend/gptj_impl.h | 2 + gpt4all-backend/llamamodel_impl.h | 2 + gpt4all-backend/llmodel.h | 7 + gpt4all-backend/llmodel_c.cpp | 19 + gpt4all-backend/llmodel_c.h | 17 + gpt4all-backend/llmodel_shared.cpp | 16 + gpt4all-backend/mpt_impl.h | 2 + gpt4all-backend/replit_impl.h | 2 + .../scripts/convert_bert_hf_to_ggml.py | 102 +++ gpt4all-bindings/python/gpt4all/__init__.py | 2 +- gpt4all-bindings/python/gpt4all/gpt4all.py | 14 + gpt4all-bindings/python/gpt4all/pyllmodel.py | 24 + .../python/gpt4all/tests/test_gpt4all.py | 10 +- gpt4all-chat/chatgpt.h | 2 + gpt4all-chat/chatllm.cpp | 9 +- gpt4all-chat/chatllm.h | 1 + 20 files changed, 673 insertions(+), 389 deletions(-) delete mode 100644 gpt4all-backend/bert.h create mode 100644 gpt4all-backend/bert_impl.h create mode 100644 gpt4all-backend/scripts/convert_bert_hf_to_ggml.py diff --git a/gpt4all-backend/bert.cpp b/gpt4all-backend/bert.cpp index 318efdc7309e..66ee2515cf55 100644 --- a/gpt4all-backend/bert.cpp +++ b/gpt4all-backend/bert.cpp @@ -1,4 +1,5 @@ -#include "bert.h" +#define BERT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE +#include "bert_impl.h" #include "ggml.h" #include @@ -16,6 +17,12 @@ //#define DEBUG_BERT +namespace { +const char *modelType_ = "Bert"; +} + +typedef int32_t bert_vocab_id; + // default hparams (all-MiniLM-L6-v2) struct bert_hparams { @@ -192,15 +199,11 @@ std::string bert_normalize_prompt(const std::string &text) } return text2; } -void bert_tokenize( + +std::vector bert_tokenize( struct bert_ctx * ctx, - const char * text, - bert_vocab_id * tokens, - int32_t * n_tokens, - int32_t n_max_tokens) + const char * text) { - int cls_tok_id = 101; - int sep_tok_id = 102; const bert_vocab &vocab = ctx->vocab; std::string str = text; @@ -225,10 +228,10 @@ void bert_tokenize( } } - int32_t t = 0; - tokens[t++] = cls_tok_id; - // find the longest tokens that form the words: + std::vector tokens; + int cls_tok_id = 101; + tokens.push_back(cls_tok_id); for (const auto &word : words) { if (word.size() == 0) @@ -237,21 +240,17 @@ void bert_tokenize( int i = 0; int n = word.size(); auto *token_map = &vocab.token_to_id; - loop: while (i < n) { - if (t >= n_max_tokens - 1) - break; int j = n; while (j > i) { auto it = token_map->find(word.substr(i, j - i)); if (it != token_map->end()) { - tokens[t++] = it->second; + tokens.push_back(it->second); i = j; token_map = &vocab.subword_token_to_id; - goto loop; } --j; } @@ -263,14 +262,247 @@ void bert_tokenize( } } } - tokens[t++] = sep_tok_id; - *n_tokens = t; + + return tokens; +} + +void bert_resize_ctx(bert_ctx * ctx, int32_t new_size) { + int64_t buf_size_new = ctx->mem_per_input * new_size; + + // TODO: Max memory should be a param? Now just 1 GB + int64_t GB = 1 << 30; +#if defined(DEBUG_BERT) + printf("%s: requested_buf_size %lldMB\n", __func__, buf_size_new / (1 << 20)); +#endif + if (buf_size_new > GB) { + int32_t adjusted_new_size = GB / ctx->mem_per_input; + if (adjusted_new_size < 1) adjusted_new_size = 1; +#if defined(DEBUG_BERT) + printf("%s: requested batch size %d, actual new batch size %d\n", __func__, new_size, adjusted_new_size); +#endif + new_size = adjusted_new_size; + buf_size_new = ctx->mem_per_input * new_size; + } + if (new_size > ctx->max_batch_n) { + ctx->buf_compute.resize(buf_size_new); + ctx->max_batch_n = new_size; + } +} + +void bert_eval( + struct bert_ctx *ctx, + int32_t n_threads, + const bert_vocab_id *raw_tokens, + int32_t n_tokens, + float *embeddings) +{ + const bert_model& model = ctx->model; + bool mem_req_mode = !embeddings; + + // batch_embeddings is nullptr for the initial memory requirements run + if (!mem_req_mode && 1 > ctx->max_batch_n) + bert_resize_ctx(ctx, 1); + + const int N = n_tokens; + const auto &tokens = raw_tokens; + + const auto &hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_layer = hparams.n_layer; + const int n_max_tokens = hparams.n_max_tokens; + const int n_head = hparams.n_head; + + const int d_head = n_embd / n_head; + + std::vector result; + if (N > n_max_tokens) + { + fprintf(stderr, "Too many tokens, maximum is %d\n", n_max_tokens); + return; + } + + auto & mem_per_token = ctx->mem_per_token; + auto & buf_compute = ctx->buf_compute; + + struct ggml_init_params params = { + .mem_size = buf_compute.size, + .mem_buffer = buf_compute.data, + .no_alloc = false, + }; + + struct ggml_context *ctx0 = ggml_init(params); + struct ggml_cgraph gf = {}; + gf.n_threads = n_threads; + + // Embeddings. word_embeddings + token_type_embeddings + position_embeddings + struct ggml_tensor *token_layer = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(token_layer->data, tokens, N * ggml_element_size(token_layer)); + + struct ggml_tensor *token_types = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + ggml_set_zero(token_types); + + struct ggml_tensor *positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + for (int i = 0; i < N; i++) + { + ggml_set_i32_1d(positions, i, i); + } + + struct ggml_tensor *inpL = ggml_get_rows(ctx0, model.word_embeddings, token_layer); + + inpL = ggml_add(ctx0, + ggml_get_rows(ctx0, model.token_type_embeddings, token_types), + inpL); + inpL = ggml_add(ctx0, + ggml_get_rows(ctx0, model.position_embeddings, positions), + inpL); + + // embd norm + { + inpL = ggml_norm(ctx0, inpL); + + inpL = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.ln_e_w, inpL), + inpL), + ggml_repeat(ctx0, model.ln_e_b, inpL)); + } + // layers + for (int il = 0; il < n_layer; il++) + { + struct ggml_tensor *cur = inpL; + + // self-attention + { + struct ggml_tensor *Qcur = cur; + Qcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, Qcur), + ggml_mul_mat(ctx0, model.layers[il].q_w, Qcur)), + d_head, n_head, N); + struct ggml_tensor *Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + + struct ggml_tensor *Kcur = cur; + Kcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].k_b, Kcur), + ggml_mul_mat(ctx0, model.layers[il].k_w, Kcur)), + d_head, n_head, N); + struct ggml_tensor *K = ggml_permute(ctx0, Kcur, 0, 2, 1, 3); + + struct ggml_tensor *Vcur = cur; + Vcur = ggml_reshape_3d(ctx0, + ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].v_b, Vcur), + ggml_mul_mat(ctx0, model.layers[il].v_w, Vcur)), + d_head, n_head, N); + struct ggml_tensor *V = ggml_permute(ctx0, Vcur, 0, 2, 1, 3); + + struct ggml_tensor *KQ = ggml_mul_mat(ctx0, K, Q); + // KQ = soft_max(KQ / sqrt(head width)) + KQ = ggml_soft_max(ctx0, + ggml_scale(ctx0, + KQ, + ggml_new_f32(ctx0, 1.0f / sqrt((float)d_head)))); + + V = ggml_cont(ctx0, ggml_transpose(ctx0, V)); + struct ggml_tensor *KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + cur = ggml_cpy(ctx0, + KQV, + ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + } + // attention output + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].o_b, cur), + ggml_mul_mat(ctx0, model.layers[il].o_w, cur)); + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + // attention norm + { + cur = ggml_norm(ctx0, cur); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_att_w, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_att_b, cur)); + } + struct ggml_tensor *att_output = cur; + // intermediate_output = self.intermediate(attention_output) + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].ff_i_b, cur), + cur); + cur = ggml_gelu(ctx0, cur); + + // layer_output = self.output(intermediate_output, attention_output) + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, + ggml_repeat(ctx0, model.layers[il].ff_o_b, cur), + cur); + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, att_output, cur); + + // output norm + { + cur = ggml_norm(ctx0, cur); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.layers[il].ln_out_w, cur), + cur), + ggml_repeat(ctx0, model.layers[il].ln_out_b, cur)); + } + inpL = cur; + } + inpL = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); + // pooler + struct ggml_tensor *sum = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, N, 1); + ggml_set_f32(sum, 1.0f / N); + inpL = ggml_mul_mat(ctx0, inpL, sum); + + // normalizer + ggml_tensor *length = ggml_sqrt(ctx0, + ggml_sum(ctx0, ggml_sqr(ctx0, inpL))); + inpL = ggml_scale(ctx0, inpL, ggml_div(ctx0, ggml_new_f32(ctx0, 1.0f), length)); + + ggml_tensor *output = inpL; + // run the computation + ggml_build_forward_expand(&gf, output); + ggml_graph_compute(ctx0, &gf); + + + // float *dat = ggml_get_data_f32(output); + // pretty_print_tensor(dat, output->ne, output->nb, output->n_dims - 1, ""); + + #ifdef GGML_PERF + // print timing information per ggml operation (for debugging purposes) + // requires GGML_PERF to be defined + ggml_graph_print(&gf); + #endif + + if (!mem_req_mode) { + memcpy(embeddings, (float *)ggml_get_data(output), sizeof(float) * n_embd); + } else { + mem_per_token = ggml_used_mem(ctx0) / N; + } + + // printf("used_mem = %zu KB \n", ggml_used_mem(ctx0) / 1024); + // printf("mem_per_token = %zu KB \n", mem_per_token / 1024); + + ggml_free(ctx0); } // // Loading and setup // +void bert_free(bert_ctx * ctx) { + ggml_free(ctx->model.ctx); + delete ctx; +} + struct bert_ctx * bert_load_from_file(const char *fname) { #if defined(DEBUG_BERT) @@ -288,7 +520,7 @@ struct bert_ctx * bert_load_from_file(const char *fname) { uint32_t magic; fin.read((char *)&magic, sizeof(magic)); - if (magic != 0x67676d6c) + if (magic != 0x62657274) { fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname); return nullptr; @@ -506,7 +738,9 @@ struct bert_ctx * bert_load_from_file(const char *fname) // load weights { int n_tensors = 0; +#if defined(DEBUG_BERT) size_t total_size = 0; +#endif #if defined(DEBUG_BERT) printf("%s: ", __func__); @@ -609,8 +843,10 @@ struct bert_ctx * bert_load_from_file(const char *fname) fin.read(reinterpret_cast(tensor->data), ggml_nbytes(tensor)); +#if defined(DEBUG_BERT) // printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0); total_size += ggml_nbytes(tensor); +#endif if (++n_tensors % 8 == 0) { @@ -639,7 +875,7 @@ struct bert_ctx * bert_load_from_file(const char *fname) // TODO: Max tokens should be a param? int32_t N = new_bert->model.hparams.n_max_tokens; - new_bert->mem_per_input = 1.1 * (new_bert->mem_per_token * N); // add 10% to account for ggml object overhead + new_bert->mem_per_input = 1.9 * (new_bert->mem_per_token * N); // add 10% to account for ggml object overhead } #if defined(DEBUG_BERT) @@ -649,331 +885,183 @@ struct bert_ctx * bert_load_from_file(const char *fname) return new_bert; } -void bert_resize_ctx(bert_ctx * ctx, int32_t new_size) { - int64_t buf_size_new = ctx->mem_per_input * new_size; +struct BertPrivate { + const std::string modelPath; + bool modelLoaded; + bert_ctx *ctx = nullptr; + int64_t n_threads = 0; +}; - // TODO: Max memory should be a param? Now just 1 GB - int64_t GB = 1 << 30; - //printf("%s: requested_buf_size %ldMB\n", __func__, buf_size_new / (1 << 20)); - if (buf_size_new > GB) { - int32_t adjusted_new_size = GB / ctx->mem_per_input; - if (adjusted_new_size < 1) adjusted_new_size = 1; - //printf("%s: requested batch size %d, actual new batch size %d\n", __func__, new_size, adjusted_new_size); - new_size = adjusted_new_size; - buf_size_new = ctx->mem_per_input * new_size; - } - if (new_size > ctx->max_batch_n) { - ctx->buf_compute.resize(buf_size_new); - ctx->max_batch_n = new_size; - } +Bert::Bert() : d_ptr(new BertPrivate) { + d_ptr->modelLoaded = false; } -void bert_free(bert_ctx * ctx) { - ggml_free(ctx->model.ctx); - delete ctx; +Bert::~Bert() { + bert_free(d_ptr->ctx); } -void bert_eval( - struct bert_ctx *ctx, - int32_t n_threads, - bert_vocab_id *tokens, - int32_t n_tokens, - float *embeddings) +bool Bert::loadModel(const std::string &modelPath) { - bert_eval_batch(ctx, n_threads, 1, &tokens, &n_tokens, embeddings ? &embeddings : nullptr); + d_ptr->ctx = bert_load_from_file(modelPath.c_str()); + d_ptr->n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + d_ptr->modelLoaded = d_ptr->ctx != nullptr; + fflush(stdout); + return true; } -void bert_eval_batch( - bert_ctx * ctx, - int32_t n_threads, - int32_t n_batch_size, - bert_vocab_id ** batch_tokens, - int32_t * n_tokens, - float ** batch_embeddings) +bool Bert::isModelLoaded() const { - const bert_model& model = ctx->model; - bool mem_req_mode = !batch_embeddings; - // batch_embeddings is nullptr for the initial memory requirements run - if (!mem_req_mode && n_batch_size > ctx->max_batch_n) { - bert_resize_ctx(ctx, n_batch_size); - if (n_batch_size > ctx->max_batch_n) { - fprintf(stderr, "%s: tried to increase buffers to batch size %d but failed\n", __func__, n_batch_size); - return; - } - } - - // TODO: implement real batching - for (int ba = 0; ba < n_batch_size; ba++) - { - const int N = n_tokens[ba]; - const auto &tokens = batch_tokens[ba]; - - const auto &hparams = model.hparams; - - const int n_embd = hparams.n_embd; - const int n_layer = hparams.n_layer; - const int n_max_tokens = hparams.n_max_tokens; - const int n_head = hparams.n_head; - - const int d_head = n_embd / n_head; - - std::vector result; - if (N > n_max_tokens) - { - fprintf(stderr, "Too many tokens, maximum is %d\n", n_max_tokens); - return; - } - - auto & mem_per_token = ctx->mem_per_token; - auto & buf_compute = ctx->buf_compute; - - struct ggml_init_params params = { - .mem_size = buf_compute.size, - .mem_buffer = buf_compute.data, - .no_alloc = false, - }; - - struct ggml_context *ctx0 = ggml_init(params); - struct ggml_cgraph gf = {}; - gf.n_threads = n_threads; - - // Embeddings. word_embeddings + token_type_embeddings + position_embeddings - struct ggml_tensor *token_layer = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - memcpy(token_layer->data, tokens, N * ggml_element_size(token_layer)); - - struct ggml_tensor *token_types = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - ggml_set_zero(token_types); - - struct ggml_tensor *positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); - for (int i = 0; i < N; i++) - { - ggml_set_i32_1d(positions, i, i); - } - - struct ggml_tensor *inpL = ggml_get_rows(ctx0, model.word_embeddings, token_layer); - - inpL = ggml_add(ctx0, - ggml_get_rows(ctx0, model.token_type_embeddings, token_types), - inpL); - inpL = ggml_add(ctx0, - ggml_get_rows(ctx0, model.position_embeddings, positions), - inpL); - - // embd norm - { - inpL = ggml_norm(ctx0, inpL); - - inpL = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.ln_e_w, inpL), - inpL), - ggml_repeat(ctx0, model.ln_e_b, inpL)); - } - // layers - for (int il = 0; il < n_layer; il++) - { - struct ggml_tensor *cur = inpL; + return d_ptr->modelLoaded; +} - // self-attention - { - struct ggml_tensor *Qcur = cur; - Qcur = ggml_reshape_3d(ctx0, - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].q_b, Qcur), - ggml_mul_mat(ctx0, model.layers[il].q_w, Qcur)), - d_head, n_head, N); - struct ggml_tensor *Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); - - struct ggml_tensor *Kcur = cur; - Kcur = ggml_reshape_3d(ctx0, - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].k_b, Kcur), - ggml_mul_mat(ctx0, model.layers[il].k_w, Kcur)), - d_head, n_head, N); - struct ggml_tensor *K = ggml_permute(ctx0, Kcur, 0, 2, 1, 3); - - struct ggml_tensor *Vcur = cur; - Vcur = ggml_reshape_3d(ctx0, - ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].v_b, Vcur), - ggml_mul_mat(ctx0, model.layers[il].v_w, Vcur)), - d_head, n_head, N); - struct ggml_tensor *V = ggml_permute(ctx0, Vcur, 0, 2, 1, 3); - - struct ggml_tensor *KQ = ggml_mul_mat(ctx0, K, Q); - // KQ = soft_max(KQ / sqrt(head width)) - KQ = ggml_soft_max(ctx0, - ggml_scale(ctx0, - KQ, - ggml_new_f32(ctx0, 1.0f / sqrt((float)d_head)))); - - V = ggml_cont(ctx0, ggml_transpose(ctx0, V)); - struct ggml_tensor *KQV = ggml_mul_mat(ctx0, V, KQ); - KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); - - cur = ggml_cpy(ctx0, - KQV, - ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); - } - // attention output - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].o_b, cur), - ggml_mul_mat(ctx0, model.layers[il].o_w, cur)); +size_t Bert::requiredMem(const std::string &/*modelPath*/) +{ + return 0; +} - // re-add the layer input - cur = ggml_add(ctx0, cur, inpL); +size_t Bert::stateSize() const +{ + return 0; +} - // attention norm - { - cur = ggml_norm(ctx0, cur); +size_t Bert::saveState(uint8_t */*dest*/) const +{ + return 0; +} - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_att_w, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_att_b, cur)); - } - struct ggml_tensor *att_output = cur; - // intermediate_output = self.intermediate(attention_output) - cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].ff_i_b, cur), - cur); - cur = ggml_gelu(ctx0, cur); +size_t Bert::restoreState(const uint8_t */*src*/) +{ + return 0; +} - // layer_output = self.output(intermediate_output, attention_output) - cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); - cur = ggml_add(ctx0, - ggml_repeat(ctx0, model.layers[il].ff_o_b, cur), - cur); - // attentions bypass the intermediate layer - cur = ggml_add(ctx0, att_output, cur); +void Bert::setThreadCount(int32_t n_threads) +{ + d_ptr->n_threads = n_threads; +} - // output norm - { - cur = ggml_norm(ctx0, cur); +int32_t Bert::threadCount() const +{ + return d_ptr->n_threads; +} - cur = ggml_add(ctx0, - ggml_mul(ctx0, - ggml_repeat(ctx0, model.layers[il].ln_out_w, cur), - cur), - ggml_repeat(ctx0, model.layers[il].ln_out_b, cur)); - } - inpL = cur; - } - inpL = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); - // pooler - struct ggml_tensor *sum = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, N, 1); - ggml_set_f32(sum, 1.0f / N); - inpL = ggml_mul_mat(ctx0, inpL, sum); - - // normalizer - ggml_tensor *length = ggml_sqrt(ctx0, - ggml_sum(ctx0, ggml_sqr(ctx0, inpL))); - inpL = ggml_scale(ctx0, inpL, ggml_div(ctx0, ggml_new_f32(ctx0, 1.0f), length)); - - ggml_tensor *output = inpL; - // run the computation - ggml_build_forward_expand(&gf, output); - ggml_graph_compute(ctx0, &gf); - - - // float *dat = ggml_get_data_f32(output); - // pretty_print_tensor(dat, output->ne, output->nb, output->n_dims - 1, ""); - - #ifdef GGML_PERF - // print timing information per ggml operation (for debugging purposes) - // requires GGML_PERF to be defined - ggml_graph_print(&gf); - #endif - - if (!mem_req_mode) { - memcpy(batch_embeddings[ba], (float *)ggml_get_data(output), sizeof(float) * n_embd); +std::vector Bert::embedding(const std::string &text) +{ + const int overlap = 32; + const LLModel::Token clsToken = 101; + const size_t contextLength = bert_n_max_tokens(d_ptr->ctx); + typedef std::vector TokenString; + TokenString tokens = ::bert_tokenize(d_ptr->ctx, text.c_str()); +#if defined(DEBUG_BERT) + std::cerr << "embedding: " << tokens.size() + << " contextLength " << contextLength + << "\n"; +#endif + std::vector embeddingsSum(bert_n_embd(d_ptr->ctx), 0); + int embeddingsSumTotal = 0; + size_t start_pos = 0; + bool isFirstChunk = true; + while (start_pos < tokens.size()) { + TokenString chunk; + if (!isFirstChunk) + chunk.push_back(clsToken); + const size_t l = isFirstChunk ? contextLength : contextLength - 1; + if (tokens.size() - start_pos > l) { + chunk.insert(chunk.end(), tokens.begin() + start_pos, tokens.begin() + start_pos + l); + start_pos = start_pos + contextLength - overlap; } else { - mem_per_token = ggml_used_mem(ctx0) / N; - - // printf("used_mem = %zu KB \n", ggml_used_mem(ctx0) / 1024); - // printf("mem_per_token = %zu KB \n", mem_per_token / 1024); + chunk.insert(chunk.end(), tokens.begin() + start_pos, tokens.end()); + start_pos = tokens.size(); } - - ggml_free(ctx0); +#if defined(DEBUG_BERT) + std::cerr << "chunk length: " << chunk.size() + << " embeddingsSumTotal " << embeddingsSumTotal + << " contextLength " << contextLength + << " start_pos " << start_pos + << "\n"; +#endif + embeddingsSumTotal++; + std::vector embeddings(bert_n_embd(d_ptr->ctx)); + bert_eval(d_ptr->ctx, d_ptr->n_threads, chunk.data(), chunk.size(), embeddings.data()); + std::transform(embeddingsSum.begin(), embeddingsSum.end(), embeddings.begin(), embeddingsSum.begin(), std::plus()); + isFirstChunk = false; } + + std::transform(embeddingsSum.begin(), embeddingsSum.end(), embeddingsSum.begin(), [embeddingsSumTotal](float num){ return num / embeddingsSumTotal; }); + std::vector finalEmbeddings(embeddingsSum.begin(), embeddingsSum.end()); + return finalEmbeddings; } -void bert_encode( - struct bert_ctx *ctx, - int32_t n_threads, - const char *texts, - float *embeddings) +std::vector Bert::tokenize(PromptContext &, const std::string &str) const { - bert_encode_batch(ctx, n_threads, 1, 1, &texts, &embeddings); + return ::bert_tokenize(d_ptr->ctx, str.c_str()); } -void bert_encode_batch( - struct bert_ctx *ctx, - int32_t n_threads, - int32_t n_batch_size, - int32_t n_inputs, - const char ** texts, - float **embeddings) +LLModel::Token Bert::sampleToken(PromptContext &/*promptCtx*/) const { - // TODO: Disable batching for now - n_batch_size = 1; - /* - if (n_batch_size > n_inputs) { - n_batch_size = n_inputs; - } - if (n_batch_size > ctx->max_batch_n) { - bert_resize_ctx(ctx, n_batch_size); - n_batch_size = ctx->max_batch_n; - } - */ - - int32_t N = bert_n_max_tokens(ctx); - - std::vector buf_tokens; - // Most of this buffer will be unused in typical case where inputs are not that long. - buf_tokens.resize(N * n_inputs); - std::vector n_tokens = std::vector(n_inputs); - std::vector unsorted_tokens(n_inputs); - bert_vocab_id* it_tokens = buf_tokens.data(); - for (int i = 0; i < n_inputs; i++) { - unsorted_tokens[i] = it_tokens; - bert_tokenize(ctx, texts[i], it_tokens, &n_tokens[i], N); - it_tokens += n_tokens[i]; - } + return 999 /*!*/; +} - if (n_batch_size == n_inputs) { - bert_eval_batch(ctx, n_threads, n_batch_size, unsorted_tokens.data(), n_tokens.data(), embeddings); - } else { - // sort the inputs by tokenized length, batch and eval +std::string Bert::tokenToString(Token id) const +{ + return bert_vocab_id_to_token(d_ptr->ctx, id); +} - std::vector indices; - indices.reserve(n_inputs); - for (int i = 0; i < n_inputs; i++) - { - indices.push_back(i); - } +bool Bert::evalTokens(PromptContext &ctx, const std::vector &tokens) const +{ + std::vector embeddings(bert_n_embd(d_ptr->ctx)); + int32_t cls = 101; + const bool useCLS = tokens.front() != cls; + if (useCLS) { + std::vector myTokens; + myTokens.push_back(cls); + myTokens.insert(myTokens.end(), tokens.begin(), tokens.end()); + bert_eval(d_ptr->ctx, d_ptr->n_threads, myTokens.data(), myTokens.size(), embeddings.data()); + } else + bert_eval(d_ptr->ctx, d_ptr->n_threads, tokens.data(), tokens.size(), embeddings.data()); + ctx.n_past = 0; // bert does not store any context + return true; +} + +int32_t Bert::contextLength() const +{ + return bert_n_max_tokens(d_ptr->ctx); +} - std::vector sorted_n_tokens = std::vector(n_inputs); +const std::vector &Bert::endTokens() const +{ + static const std::vector out = { 102 /*sep*/}; + return out; +} - std::vector sorted_tokens(n_inputs); +#if defined(_WIN32) +#define DLL_EXPORT __declspec(dllexport) +#else +#define DLL_EXPORT __attribute__ ((visibility ("default"))) +#endif - std::sort(indices.begin(), indices.end(), [&](int a, int b) - { return n_tokens[a] < n_tokens[b]; }); +extern "C" { +DLL_EXPORT bool is_g4a_backend_model_implementation() { + return true; +} - std::vector sorted_embeddings(n_inputs); - memcpy(sorted_embeddings.data(), embeddings, n_inputs * sizeof(float *)); +DLL_EXPORT const char *get_model_type() { + return modelType_; +} - for (int i = 0; i < n_inputs; i++) { - sorted_embeddings[i] = embeddings[indices[i]]; - sorted_tokens[i] = unsorted_tokens[indices[i]]; - sorted_n_tokens[i] = n_tokens[indices[i]]; - } +DLL_EXPORT const char *get_build_variant() { + return GGML_BUILD_VARIANT; +} - for (int i = 0; i < n_inputs; i += n_batch_size) - { - if (i + n_batch_size > n_inputs) { - n_batch_size = n_inputs - i; - } - bert_eval_batch(ctx, n_threads, n_batch_size, &sorted_tokens[i], &sorted_n_tokens[i], &sorted_embeddings[i]); - } +DLL_EXPORT bool magic_match(std::istream& f) { + uint32_t magic = 0; + f.read(reinterpret_cast(&magic), sizeof(magic)); + if (magic != 0x62657274) { + return false; } + return true; +} + +DLL_EXPORT LLModel *construct() { + return new Bert; } +} \ No newline at end of file diff --git a/gpt4all-backend/bert.h b/gpt4all-backend/bert.h deleted file mode 100644 index 28435edebf0e..000000000000 --- a/gpt4all-backend/bert.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef BERT_H -#define BERT_H - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -struct bert_ctx; - -typedef int32_t bert_vocab_id; - -struct bert_ctx * bert_load_from_file(const char * fname); -void bert_free(bert_ctx * ctx); - -// Main api, does both tokenizing and evaluation - -void bert_encode( - struct bert_ctx * ctx, - int32_t n_threads, - const char * texts, - float * embeddings); - -// n_batch_size - how many to process at a time -// n_inputs - total size of texts and embeddings arrays -void bert_encode_batch( - struct bert_ctx * ctx, - int32_t n_threads, - int32_t n_batch_size, - int32_t n_inputs, - const char ** texts, - float ** embeddings); - -// Api for separate tokenization & eval - -void bert_tokenize( - struct bert_ctx * ctx, - const char * text, - bert_vocab_id * tokens, - int32_t * n_tokens, - int32_t n_max_tokens); - -void bert_eval( - struct bert_ctx * ctx, - int32_t n_threads, - bert_vocab_id * tokens, - int32_t n_tokens, - float * embeddings); - -// NOTE: for batch processing the longest input must be first -void bert_eval_batch( - struct bert_ctx * ctx, - int32_t n_threads, - int32_t n_batch_size, - bert_vocab_id ** batch_tokens, - int32_t * n_tokens, - float ** batch_embeddings); - -int32_t bert_n_embd(bert_ctx * ctx); -int32_t bert_n_max_tokens(bert_ctx * ctx); - -const char* bert_vocab_id_to_token(bert_ctx * ctx, bert_vocab_id id); - -#ifdef __cplusplus -} -#endif - -#endif // BERT_H diff --git a/gpt4all-backend/bert_impl.h b/gpt4all-backend/bert_impl.h new file mode 100644 index 000000000000..d1cc99f4ace4 --- /dev/null +++ b/gpt4all-backend/bert_impl.h @@ -0,0 +1,44 @@ +#ifndef BERT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE +#error This file is NOT meant to be included outside of bert.cpp. Doing so is DANGEROUS. Be sure to know what you are doing before proceeding to #define BERT_H_I_KNOW_WHAT_I_AM_DOING_WHEN_INCLUDING_THIS_FILE +#endif +#ifndef BERT_H +#define BERT_H + +#include +#include +#include +#include +#include "llmodel.h" + +struct BertPrivate; +class Bert : public LLModel { +public: + Bert(); + ~Bert(); + + bool supportsEmbedding() const override { return true; } + bool supportsCompletion() const override { return true; } + bool loadModel(const std::string &modelPath) override; + bool isModelLoaded() const override; + size_t requiredMem(const std::string &modelPath) override; + size_t stateSize() const override; + size_t saveState(uint8_t *dest) const override; + size_t restoreState(const uint8_t *src) override; + void setThreadCount(int32_t n_threads) override; + int32_t threadCount() const override; + + std::vector embedding(const std::string &text) override; + +private: + std::unique_ptr d_ptr; + +protected: + std::vector tokenize(PromptContext &, const std::string&) const override; + Token sampleToken(PromptContext &ctx) const override; + std::string tokenToString(Token) const override; + bool evalTokens(PromptContext &ctx, const std::vector &tokens) const override; + int32_t contextLength() const override; + const std::vector& endTokens() const override; +}; + +#endif // BERT_H diff --git a/gpt4all-backend/falcon_impl.h b/gpt4all-backend/falcon_impl.h index 017252ea3bca..2362af9fac60 100644 --- a/gpt4all-backend/falcon_impl.h +++ b/gpt4all-backend/falcon_impl.h @@ -16,6 +16,8 @@ class Falcon : public LLModel { Falcon(); ~Falcon(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string &modelPath) override; diff --git a/gpt4all-backend/gptj_impl.h b/gpt4all-backend/gptj_impl.h index 93e273190507..e2b1826e2c9a 100644 --- a/gpt4all-backend/gptj_impl.h +++ b/gpt4all-backend/gptj_impl.h @@ -15,6 +15,8 @@ class GPTJ : public LLModel { GPTJ(); ~GPTJ(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string &modelPath) override; diff --git a/gpt4all-backend/llamamodel_impl.h b/gpt4all-backend/llamamodel_impl.h index 7623f15731ea..e564c44ad2c0 100644 --- a/gpt4all-backend/llamamodel_impl.h +++ b/gpt4all-backend/llamamodel_impl.h @@ -15,6 +15,8 @@ class LLamaModel : public LLModel { LLamaModel(); ~LLamaModel(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string &modelPath) override; diff --git a/gpt4all-backend/llmodel.h b/gpt4all-backend/llmodel.h index 06f9d618b62c..29706697e343 100644 --- a/gpt4all-backend/llmodel.h +++ b/gpt4all-backend/llmodel.h @@ -61,18 +61,25 @@ class LLModel { explicit LLModel() {} virtual ~LLModel() {} + virtual bool supportsEmbedding() const = 0; + virtual bool supportsCompletion() const = 0; virtual bool loadModel(const std::string &modelPath) = 0; virtual bool isModelLoaded() const = 0; virtual size_t requiredMem(const std::string &modelPath) = 0; virtual size_t stateSize() const { return 0; } virtual size_t saveState(uint8_t */*dest*/) const { return 0; } virtual size_t restoreState(const uint8_t */*src*/) { return 0; } + + // This method requires the model to return true from supportsCompletion otherwise it will throw + // an error virtual void prompt(const std::string &prompt, std::function promptCallback, std::function responseCallback, std::function recalculateCallback, PromptContext &ctx); + virtual std::vector embedding(const std::string &text); + virtual void setThreadCount(int32_t /*n_threads*/) {} virtual int32_t threadCount() const { return 1; } diff --git a/gpt4all-backend/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp index c7e13f79a24e..fb916d951d0e 100644 --- a/gpt4all-backend/llmodel_c.cpp +++ b/gpt4all-backend/llmodel_c.cpp @@ -166,6 +166,25 @@ void llmodel_prompt(llmodel_model model, const char *prompt, ctx->context_erase = wrapper->promptContext.contextErase; } +float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size) +{ + LLModelWrapper *wrapper = reinterpret_cast(model); + std::vector embeddingVector = wrapper->llModel->embedding(text); + float *embedding = (float *)malloc(embeddingVector.size() * sizeof(float)); + if(embedding == nullptr) { + *embedding_size = 0; + return nullptr; + } + std::copy(embeddingVector.begin(), embeddingVector.end(), embedding); + *embedding_size = embeddingVector.size(); + return embedding; +} + +void llmodel_free_embedding(float *ptr) +{ + free(ptr); +} + void llmodel_setThreadCount(llmodel_model model, int32_t n_threads) { LLModelWrapper *wrapper = reinterpret_cast(model); diff --git a/gpt4all-backend/llmodel_c.h b/gpt4all-backend/llmodel_c.h index 0d221c7ecf8c..8d582d08cc3d 100644 --- a/gpt4all-backend/llmodel_c.h +++ b/gpt4all-backend/llmodel_c.h @@ -171,6 +171,23 @@ void llmodel_prompt(llmodel_model model, const char *prompt, llmodel_recalculate_callback recalculate_callback, llmodel_prompt_context *ctx); +/** + * Generate an embedding using the model. + * @param model A pointer to the llmodel_model instance. + * @param text A string representing the text to generate an embedding for. + * @param embedding_size A pointer to a size_t type that will be set by the call indicating the length + * of the returned floating point array. + * @return A pointer to an array of floating point values passed to the calling method which then will + * be responsible for lifetime of this memory. + */ +float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size); + +/** + * Frees the memory allocated by the llmodel_embedding function. + * @param ptr A pointer to the embedding as returned from llmodel_embedding. + */ +void llmodel_free_embedding(float *ptr); + /** * Set the number of threads to be used by the model. * @param model A pointer to the llmodel_model instance. diff --git a/gpt4all-backend/llmodel_shared.cpp b/gpt4all-backend/llmodel_shared.cpp index fe1db76397dd..89ba32b5afca 100644 --- a/gpt4all-backend/llmodel_shared.cpp +++ b/gpt4all-backend/llmodel_shared.cpp @@ -37,6 +37,13 @@ void LLModel::prompt(const std::string &prompt, return; } + if (!supportsCompletion()) { + std::string errorMessage = "ERROR: this model does not support text completion or chat!\n"; + responseCallback(-1, errorMessage); + std::cerr << implementation().modelType() << errorMessage; + return; + } + // tokenize the prompt std::vector embd_inp = tokenize(promptCtx, prompt); @@ -158,3 +165,12 @@ void LLModel::prompt(const std::string &prompt, cachedTokens.clear(); } } + +std::vector LLModel::embedding(const std::string &/*text*/) +{ + if (!supportsCompletion()) { + std::string errorMessage = "ERROR: this model does not support generating embeddings!\n"; + std::cerr << implementation().modelType() << errorMessage; + } + return std::vector(); +} diff --git a/gpt4all-backend/mpt_impl.h b/gpt4all-backend/mpt_impl.h index f515683693d6..df7b77184cd7 100644 --- a/gpt4all-backend/mpt_impl.h +++ b/gpt4all-backend/mpt_impl.h @@ -15,6 +15,8 @@ class MPT : public LLModel { MPT(); ~MPT(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string &modelPath) override; diff --git a/gpt4all-backend/replit_impl.h b/gpt4all-backend/replit_impl.h index 73a8ea80bf46..f635f30dbab5 100644 --- a/gpt4all-backend/replit_impl.h +++ b/gpt4all-backend/replit_impl.h @@ -17,6 +17,8 @@ class Replit : public LLModel { Replit(); ~Replit(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string & modelPath) override; diff --git a/gpt4all-backend/scripts/convert_bert_hf_to_ggml.py b/gpt4all-backend/scripts/convert_bert_hf_to_ggml.py new file mode 100644 index 000000000000..ba7045ca91aa --- /dev/null +++ b/gpt4all-backend/scripts/convert_bert_hf_to_ggml.py @@ -0,0 +1,102 @@ +import sys +import struct +import json +import torch +import numpy as np + +from transformers import AutoModel, AutoTokenizer + +if len(sys.argv) < 3: + print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") + sys.exit(1) + +# output in the same directory as the model +dir_model = sys.argv[1] +fname_out = sys.argv[1] + "/ggml-model.bin" + +with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: + encoder = json.load(f) + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +with open(dir_model + "/vocab.txt", "r", encoding="utf-8") as f: + vocab = f.readlines() +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if len(sys.argv) > 2: + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + sys.exit(1) + fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin" + + +tokenizer = AutoTokenizer.from_pretrained(dir_model) +model = AutoModel.from_pretrained(dir_model, low_cpu_mem_usage=True) +print (model) + +print(tokenizer.encode('I believe the meaning of life is')) + +list_vars = model.state_dict() +for name in list_vars.keys(): + print(name, list_vars[name].shape, list_vars[name].dtype) + +fout = open(fname_out, "wb") + +print(hparams) + +fout.write(struct.pack("i", 0x62657274)) # magic: ggml in hex +fout.write(struct.pack("i", hparams["vocab_size"])) +fout.write(struct.pack("i", hparams["max_position_embeddings"])) +fout.write(struct.pack("i", hparams["hidden_size"])) +fout.write(struct.pack("i", hparams["intermediate_size"])) +fout.write(struct.pack("i", hparams["num_attention_heads"])) +fout.write(struct.pack("i", hparams["num_hidden_layers"])) +fout.write(struct.pack("i", ftype)) + +for i in range(hparams["vocab_size"]): + text = vocab[i][:-1] # strips newline at the end + #print(f"{i}:{text}") + data = bytes(text, 'utf-8') + fout.write(struct.pack("i", len(data))) + fout.write(data) + +for name in list_vars.keys(): + data = list_vars[name].squeeze().numpy() + if name in ['embeddings.position_ids', 'pooler.dense.weight', 'pooler.dense.bias']: + continue + print("Processing variable: " + name + " with shape: ", data.shape) + + n_dims = len(data.shape); + + # ftype == 0 -> float32, ftype == 1 -> float16 + if ftype == 1 and name[-7:] == ".weight" and n_dims == 2: + print(" Converting to float16") + data = data.astype(np.float16) + l_type = 1 + else: + l_type = 0 + + # header + str = name.encode('utf-8') + fout.write(struct.pack("iii", n_dims, len(str), l_type)) + for i in range(n_dims): + fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) + fout.write(str); + + # data + data.tofile(fout) + +fout.close() + +print("Done. Output file: " + fname_out) +print("") diff --git a/gpt4all-bindings/python/gpt4all/__init__.py b/gpt4all-bindings/python/gpt4all/__init__.py index 4c0cc9e6d0f7..54491d79133b 100644 --- a/gpt4all-bindings/python/gpt4all/__init__.py +++ b/gpt4all-bindings/python/gpt4all/__init__.py @@ -1,2 +1,2 @@ -from .gpt4all import GPT4All # noqa +from .gpt4all import GPT4All, embed # noqa from .pyllmodel import LLModel # noqa diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index 7c126b76a362..1eddf2e64457 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -15,6 +15,20 @@ # TODO: move to config DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") +def embed( + text: str +) -> list[float]: + """ + Generate an embedding for all GPT4All. + + Args: + text: The text document to generate an embedding for. + + Returns: + An embedding of your document of text. + """ + model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') + return model.model.generate_embedding(text) class GPT4All: """ diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py index 7e091207db1c..8aa33227e7c3 100644 --- a/gpt4all-bindings/python/gpt4all/pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -112,6 +112,19 @@ class LLModelPromptContext(ctypes.Structure): llmodel.llmodel_prompt.restype = None +llmodel.llmodel_embedding.argtypes = [ + ctypes.c_void_p, + ctypes.c_char_p, + ctypes.POINTER(ctypes.c_size_t), +] + +llmodel.llmodel_embedding.restype = ctypes.POINTER(ctypes.c_float) + +llmodel.llmodel_free_embedding.argtypes = [ + ctypes.POINTER(ctypes.c_float) +] +llmodel.llmodel_free_embedding.restype = None + llmodel.llmodel_setThreadCount.argtypes = [ctypes.c_void_p, ctypes.c_int32] llmodel.llmodel_setThreadCount.restype = None @@ -233,6 +246,17 @@ def _set_context( self.context.repeat_last_n = repeat_last_n self.context.context_erase = context_erase + def generate_embedding( + self, + text: str + ) -> list[float]: + embedding_size = ctypes.c_size_t() + c_text = ctypes.c_char_p(text.encode('utf-8')) + embedding_ptr = llmodel.llmodel_embedding(self.model, c_text, ctypes.byref(embedding_size)) + embedding_array = ctypes.cast(embedding_ptr, ctypes.POINTER(ctypes.c_float * embedding_size.value)).contents + llmodel.llmodel_free_embedding(embedding_ptr) + return list(embedding_array) + def prompt_model( self, prompt: str, diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index df382ed5ce81..dd9aa417ac04 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -1,7 +1,7 @@ import sys from io import StringIO -from gpt4all import GPT4All +from gpt4all import GPT4All, embed def test_inference(): @@ -99,3 +99,11 @@ def test_inference_mpt(): output = model.generate(prompt) assert isinstance(output, str) assert len(output) > 0 + +def test_embedding(): + text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est' + output = embed(text) + #for i, value in enumerate(output): + #print(f'Value at index {i}: {value}') + assert len(output) == 384 + diff --git a/gpt4all-chat/chatgpt.h b/gpt4all-chat/chatgpt.h index b1f322985eb7..0f835bee33ad 100644 --- a/gpt4all-chat/chatgpt.h +++ b/gpt4all-chat/chatgpt.h @@ -46,6 +46,8 @@ class ChatGPT : public QObject, public LLModel { ChatGPT(); virtual ~ChatGPT(); + bool supportsEmbedding() const override { return false; } + bool supportsCompletion() const override { return true; } bool loadModel(const std::string &modelPath) override; bool isModelLoaded() const override; size_t requiredMem(const std::string &modelPath) override; diff --git a/gpt4all-chat/chatllm.cpp b/gpt4all-chat/chatllm.cpp index 37c92d536ec6..809102748526 100644 --- a/gpt4all-chat/chatllm.cpp +++ b/gpt4all-chat/chatllm.cpp @@ -14,6 +14,7 @@ #define REPLIT_INTERNAL_STATE_VERSION 0 #define LLAMA_INTERNAL_STATE_VERSION 0 #define FALCON_INTERNAL_STATE_VERSION 0 +#define BERT_INTERNAL_STATE_VERSION 0 class LLModelStore { public: @@ -264,6 +265,7 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo) case 'M': m_llModelType = LLModelType::MPT_; break; case 'R': m_llModelType = LLModelType::REPLIT_; break; case 'F': m_llModelType = LLModelType::FALCON_; break; + case 'B': m_llModelType = LLModelType::BERT_; break; default: { delete std::exchange(m_llModelInfo.model, nullptr); @@ -628,8 +630,8 @@ bool ChatLLM::handleNameRecalculate(bool isRecalc) qDebug() << "name recalc" << m_llmThread.objectName() << isRecalc; #endif Q_UNUSED(isRecalc); - Q_UNREACHABLE(); - return false; + qt_noop(); + return true; } bool ChatLLM::handleSystemPrompt(int32_t token) @@ -669,7 +671,8 @@ bool ChatLLM::serialize(QDataStream &stream, int version) case MPT_: stream << MPT_INTERNAL_STATE_VERSION; break; case GPTJ_: stream << GPTJ_INTERNAL_STATE_VERSION; break; case LLAMA_: stream << LLAMA_INTERNAL_STATE_VERSION; break; - case FALCON_: stream << LLAMA_INTERNAL_STATE_VERSION; break; + case FALCON_: stream << FALCON_INTERNAL_STATE_VERSION; break; + case BERT_: stream << BERT_INTERNAL_STATE_VERSION; break; default: Q_UNREACHABLE(); } } diff --git a/gpt4all-chat/chatllm.h b/gpt4all-chat/chatllm.h index aad8a0fb5adf..f75d24e26bcf 100644 --- a/gpt4all-chat/chatllm.h +++ b/gpt4all-chat/chatllm.h @@ -16,6 +16,7 @@ enum LLModelType { CHATGPT_, REPLIT_, FALCON_, + BERT_ }; struct LLModelInfo { From 4963db8f4369471a97199c082ea39e0c3f88b244 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 14:20:48 -0400 Subject: [PATCH 138/198] Bump the version numbers for both python and c backend. --- gpt4all-backend/CMakeLists.txt | 2 +- gpt4all-bindings/python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-backend/CMakeLists.txt b/gpt4all-backend/CMakeLists.txt index 5ca3b688e6eb..830a3b867583 100644 --- a/gpt4all-backend/CMakeLists.txt +++ b/gpt4all-backend/CMakeLists.txt @@ -20,7 +20,7 @@ endif() include_directories("${CMAKE_CURRENT_BINARY_DIR}") set(LLMODEL_VERSION_MAJOR 0) -set(LLMODEL_VERSION_MINOR 2) +set(LLMODEL_VERSION_MINOR 3) set(LLMODEL_VERSION_PATCH 0) set(LLMODEL_VERSION "${LLMODEL_VERSION_MAJOR}.${LLMODEL_VERSION_MINOR}.${LLMODEL_VERSION_PATCH}") project(llmodel VERSION ${LLMODEL_VERSION} LANGUAGES CXX C) diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 724b80453bc0..77076f639d1b 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -61,7 +61,7 @@ def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): setup( name=package_name, - version="1.0.3", + version="1.0.4", description="Python bindings for GPT4All", author="Richard Guo", author_email="richard@nomic.ai", From 620090067799c96d7436a064139941ab9c8d5097 Mon Sep 17 00:00:00 2001 From: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> Date: Thu, 13 Jul 2023 20:44:17 +0200 Subject: [PATCH 139/198] Fix Windows MSVC arch detection (#1194) - in llmodel.cpp to fix AVX-only handling Signed-off-by: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> --- gpt4all-backend/llmodel.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-backend/llmodel.cpp b/gpt4all-backend/llmodel.cpp index b0e498087e51..d3ac6c960873 100644 --- a/gpt4all-backend/llmodel.cpp +++ b/gpt4all-backend/llmodel.cpp @@ -18,7 +18,7 @@ std::string s_implementations_search_path = "."; static bool has_at_least_minimal_hardware() { -#ifdef __x86_64__ +#if defined(__x86_64__) || defined(_M_X64) #ifndef _MSC_VER return __builtin_cpu_supports("avx"); #else @@ -30,7 +30,7 @@ static bool has_at_least_minimal_hardware() { } static bool requires_avxonly() { -#ifdef __x86_64__ +#if defined(__x86_64__) || defined(_M_X64) #ifndef _MSC_VER return !__builtin_cpu_supports("avx2"); #else From ee4186d579858c9b727d919ebfa102f9c53d65c1 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Thu, 13 Jul 2023 17:57:48 -0400 Subject: [PATCH 140/198] Fixup bert python bindings. --- gpt4all-backend/bert.cpp | 9 +++--- gpt4all-bindings/python/gpt4all/__init__.py | 2 +- gpt4all-bindings/python/gpt4all/gpt4all.py | 30 +++++++++++-------- gpt4all-bindings/python/gpt4all/pyllmodel.py | 2 +- .../python/gpt4all/tests/test_gpt4all.py | 17 ++++++++--- 5 files changed, 37 insertions(+), 23 deletions(-) diff --git a/gpt4all-backend/bert.cpp b/gpt4all-backend/bert.cpp index 66ee2515cf55..7a83289b6984 100644 --- a/gpt4all-backend/bert.cpp +++ b/gpt4all-backend/bert.cpp @@ -14,6 +14,7 @@ #include #include #include +#include //#define DEBUG_BERT @@ -462,11 +463,6 @@ void bert_eval( ggml_set_f32(sum, 1.0f / N); inpL = ggml_mul_mat(ctx0, inpL, sum); - // normalizer - ggml_tensor *length = ggml_sqrt(ctx0, - ggml_sum(ctx0, ggml_sqr(ctx0, inpL))); - inpL = ggml_scale(ctx0, inpL, ggml_div(ctx0, ggml_new_f32(ctx0, 1.0f), length)); - ggml_tensor *output = inpL; // run the computation ggml_build_forward_expand(&gf, output); @@ -987,6 +983,9 @@ std::vector Bert::embedding(const std::string &text) } std::transform(embeddingsSum.begin(), embeddingsSum.end(), embeddingsSum.begin(), [embeddingsSumTotal](float num){ return num / embeddingsSumTotal; }); + double magnitude = std::sqrt(std::inner_product(embeddingsSum.begin(), embeddingsSum.end(), embeddingsSum.begin(), 0.0)); + for (auto &value : embeddingsSum) + value /= magnitude; std::vector finalEmbeddings(embeddingsSum.begin(), embeddingsSum.end()); return finalEmbeddings; } diff --git a/gpt4all-bindings/python/gpt4all/__init__.py b/gpt4all-bindings/python/gpt4all/__init__.py index 54491d79133b..0b26abfe2294 100644 --- a/gpt4all-bindings/python/gpt4all/__init__.py +++ b/gpt4all-bindings/python/gpt4all/__init__.py @@ -1,2 +1,2 @@ -from .gpt4all import GPT4All, embed # noqa +from .gpt4all import GPT4All, Embedder # noqa from .pyllmodel import LLModel # noqa diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index 1eddf2e64457..cfe9e7ff4ab4 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -15,20 +15,26 @@ # TODO: move to config DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") -def embed( - text: str -) -> list[float]: - """ - Generate an embedding for all GPT4All. +class Embedder: + def __init__( + self + ): + self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin', n_threads=8) + + def embed( + self, + text: str + ) -> list[float]: + """ + Generate an embedding for all GPT4All. - Args: - text: The text document to generate an embedding for. + Args: + text: The text document to generate an embedding for. - Returns: - An embedding of your document of text. - """ - model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') - return model.model.generate_embedding(text) + Returns: + An embedding of your document of text. + """ + return self.gpt4all.model.generate_embedding(text) class GPT4All: """ diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py index 8aa33227e7c3..519d70f3d492 100644 --- a/gpt4all-bindings/python/gpt4all/pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -253,7 +253,7 @@ def generate_embedding( embedding_size = ctypes.c_size_t() c_text = ctypes.c_char_p(text.encode('utf-8')) embedding_ptr = llmodel.llmodel_embedding(self.model, c_text, ctypes.byref(embedding_size)) - embedding_array = ctypes.cast(embedding_ptr, ctypes.POINTER(ctypes.c_float * embedding_size.value)).contents + embedding_array = [embedding_ptr[i] for i in range(embedding_size.value)] llmodel.llmodel_free_embedding(embedding_ptr) return list(embedding_array) diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index dd9aa417ac04..82c761b57908 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -1,8 +1,8 @@ import sys from io import StringIO -from gpt4all import GPT4All, embed - +from gpt4all import GPT4All, Embedder +import time def test_inference(): model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin') @@ -101,9 +101,18 @@ def test_inference_mpt(): assert len(output) > 0 def test_embedding(): - text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id estLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est' - output = embed(text) + text = 'The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox' + start_time = time.time() + embedder = Embedder() + for i in range(1000): + output = embedder.embed(text) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"Time taken: {elapsed_time} tokens/seconds") + #for i, value in enumerate(output): #print(f'Value at index {i}: {value}') assert len(output) == 384 +if __name__ == "__main__": + test_embedding() From 15f1fe544572f9395163850810a28e665cc31fac Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Thu, 13 Jul 2023 18:23:20 -0400 Subject: [PATCH 141/198] rename embedder --- gpt4all-bindings/python/gpt4all/__init__.py | 2 +- gpt4all-bindings/python/gpt4all/gpt4all.py | 2 +- gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gpt4all-bindings/python/gpt4all/__init__.py b/gpt4all-bindings/python/gpt4all/__init__.py index 0b26abfe2294..71480e937a76 100644 --- a/gpt4all-bindings/python/gpt4all/__init__.py +++ b/gpt4all-bindings/python/gpt4all/__init__.py @@ -1,2 +1,2 @@ -from .gpt4all import GPT4All, Embedder # noqa +from .gpt4all import GPT4All, Embed4All # noqa from .pyllmodel import LLModel # noqa diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index cfe9e7ff4ab4..f828b572222e 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -15,7 +15,7 @@ # TODO: move to config DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") -class Embedder: +class Embed4All: def __init__( self ): diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index 82c761b57908..e9c344703fc6 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -1,7 +1,7 @@ import sys from io import StringIO -from gpt4all import GPT4All, Embedder +from gpt4all import GPT4All, Embed4All import time def test_inference(): From 936dcd2bfce8be5a922dbbbed5c01ac5ad3257d3 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Thu, 13 Jul 2023 18:23:40 -0400 Subject: [PATCH 142/198] use default n_threads --- gpt4all-bindings/python/gpt4all/gpt4all.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index f828b572222e..d4813c1214e7 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -19,7 +19,7 @@ class Embed4All: def __init__( self ): - self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin', n_threads=8) + self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') def embed( self, From 1c4a244291e0e6f141a7041d20846da56025223d Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Thu, 13 Jul 2023 18:24:01 -0400 Subject: [PATCH 143/198] bump mem allocation a bit --- gpt4all-backend/bert.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-backend/bert.cpp b/gpt4all-backend/bert.cpp index 7a83289b6984..29532c4824ec 100644 --- a/gpt4all-backend/bert.cpp +++ b/gpt4all-backend/bert.cpp @@ -871,7 +871,7 @@ struct bert_ctx * bert_load_from_file(const char *fname) // TODO: Max tokens should be a param? int32_t N = new_bert->model.hparams.n_max_tokens; - new_bert->mem_per_input = 1.9 * (new_bert->mem_per_token * N); // add 10% to account for ggml object overhead + new_bert->mem_per_input = 2.2 * (new_bert->mem_per_token * N); // add 10% to account for ggml object overhead } #if defined(DEBUG_BERT) From c77ab849c0937d27db7ac2efa3df3393eb247bb3 Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Thu, 13 Jul 2023 18:24:25 -0400 Subject: [PATCH 144/198] LLModel objects should hold a reference to the library prevents llmodel lib from being gc'd before live model objects --- gpt4all-bindings/python/gpt4all/pyllmodel.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py index 519d70f3d492..e8895a9c0c64 100644 --- a/gpt4all-bindings/python/gpt4all/pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -154,10 +154,11 @@ def __init__(self): self.model = None self.model_name = None self.context = None + self.llmodel_lib = llmodel def __del__(self): if self.model is not None: - llmodel.llmodel_model_destroy(self.model) + self.llmodel_lib.llmodel_model_destroy(self.model) def memory_needed(self, model_path: str) -> int: model_path_enc = model_path.encode("utf-8") From bb2b82e1b965fbd5af032188b4903ab3386afc27 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 14 Jul 2023 09:43:18 -0400 Subject: [PATCH 145/198] Add docs and bump version since we changed python api again. --- gpt4all-bindings/python/gpt4all/gpt4all.py | 8 +++++++- gpt4all-bindings/python/setup.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index d4813c1214e7..3b1a27fd0f10 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -16,9 +16,15 @@ DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") class Embed4All: + """ + Python class that handles embeddings for GPT4All. + """ def __init__( self ): + """ + Constructor + """ self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') def embed( @@ -26,7 +32,7 @@ def embed( text: str ) -> list[float]: """ - Generate an embedding for all GPT4All. + Generate an embedding. Args: text: The text document to generate an embedding for. diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 77076f639d1b..e43d9ce9480a 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -61,7 +61,7 @@ def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): setup( name=package_name, - version="1.0.4", + version="1.0.5", description="Python bindings for GPT4All", author="Richard Guo", author_email="richard@nomic.ai", From 6656f0f41e5191ec40c6292146693b4bd4257584 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 14 Jul 2023 09:45:42 -0400 Subject: [PATCH 146/198] Fix the test to work and not do timings. --- .../python/gpt4all/tests/test_gpt4all.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index e9c344703fc6..6fdaa6cca722 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -102,17 +102,8 @@ def test_inference_mpt(): def test_embedding(): text = 'The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox jumps over the lazy dog The quick brown fox' - start_time = time.time() - embedder = Embedder() - for i in range(1000): - output = embedder.embed(text) - end_time = time.time() - elapsed_time = end_time - start_time - print(f"Time taken: {elapsed_time} tokens/seconds") - + embedder = Embed4All() + output = embedder.embed(text) #for i, value in enumerate(output): #print(f'Value at index {i}: {value}') assert len(output) == 384 - -if __name__ == "__main__": - test_embedding() From 0c0a4f2c22768c64e44f73eb9d5c895c1cc818c3 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 14 Jul 2023 10:48:18 -0400 Subject: [PATCH 147/198] Add the docs. --- gpt4all-bindings/python/docs/gpt4all_python.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/gpt4all-bindings/python/docs/gpt4all_python.md b/gpt4all-bindings/python/docs/gpt4all_python.md index 73f3402b84c2..c95f6b0dd38e 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python.md +++ b/gpt4all-bindings/python/docs/gpt4all_python.md @@ -109,5 +109,22 @@ with model.chat_session(): print(model.current_chat_session) ``` +### Generating embeddings +GPT4All includes a super simple means of generating embeddings for your text documents. The embedding model will automatically be downloaded if not installed. +=== "Embed4All Example" + ``` py + from gpt4all import GPT4All, Embed4All + text = 'The quick brown fox jumps over the lazy dog' + embedder = Embed4All() + output = embedder.embed(text) + print(output) + ``` +=== "Output" + ``` + [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...] + ``` + +### API documentation ::: gpt4all.gpt4all.GPT4All +::: gpt4all.gpt4all.Embed4All From 6c8669cad30eb4624efd104922a2cc6961c9d2b3 Mon Sep 17 00:00:00 2001 From: Lakshay Kansal Date: Thu, 13 Jul 2023 17:01:08 -0400 Subject: [PATCH 148/198] highlighting rules for html and php and latex --- gpt4all-chat/responsetext.cpp | 154 +++++++++++++++++++++++++++++++++- 1 file changed, 153 insertions(+), 1 deletion(-) diff --git a/gpt4all-chat/responsetext.cpp b/gpt4all-chat/responsetext.cpp index 9ca996a233ea..68fedd1e8bbb 100644 --- a/gpt4all-chat/responsetext.cpp +++ b/gpt4all-chat/responsetext.cpp @@ -18,6 +18,9 @@ enum Language { Go, Json, Csharp, + Latex, + Html, + Php }; static QColor keywordColor = "#2e95d3"; // blue @@ -33,6 +36,11 @@ static QColor commandColor = functionCallColor; static QColor variableColor = numberColor; static QColor keyColor = functionColor; static QColor valueColor = stringColor; +static QColor parameterColor = stringColor; +static QColor attributeNameColor = numberColor; +static QColor attributeValueColor = stringColor; +static QColor specialCharacterColor = functionColor; +static QColor doctypeColor = commentColor; static Language stringToLanguage(const QString &language) { @@ -62,6 +70,12 @@ static Language stringToLanguage(const QString &language) return Go; if (language == "json") return Json; + if (language == "latex") + return Latex; + if (language == "html") + return Html; + if (language == "php") + return Php; return None; } @@ -561,6 +575,135 @@ static QVector bashHighlightingRules() return highlightingRules; } +static QVector latexHighlightingRules() +{ + static QVector highlightingRules; + if (highlightingRules.isEmpty()) { + + HighlightingRule rule; + + QTextCharFormat commandFormat; + commandFormat.setForeground(commandColor); // commandColor needs to be set to your liking + rule.pattern = QRegularExpression("\\\\[A-Za-z]+"); // Pattern for LaTeX commands + rule.format = commandFormat; + highlightingRules.append(rule); + + QTextCharFormat commentFormat; + commentFormat.setForeground(commentColor); // commentColor needs to be set to your liking + rule.pattern = QRegularExpression("%[^\n]*"); // Pattern for LaTeX comments + rule.format = commentFormat; + highlightingRules.append(rule); + } + return highlightingRules; +} + +static QVector htmlHighlightingRules() +{ + static QVector highlightingRules; + if (highlightingRules.isEmpty()) { + + HighlightingRule rule; + + QTextCharFormat attributeNameFormat; + attributeNameFormat.setForeground(attributeNameColor); + rule.pattern = QRegularExpression("\\b(\\w+)\\s*="); + rule.format = attributeNameFormat; + highlightingRules.append(rule); + + QTextCharFormat attributeValueFormat; + attributeValueFormat.setForeground(attributeValueColor); + rule.pattern = QRegularExpression("\".*?\"|'.*?'"); + rule.format = attributeValueFormat; + highlightingRules.append(rule); + + QTextCharFormat commentFormat; + commentFormat.setForeground(commentColor); + rule.pattern = QRegularExpression(""); + rule.format = commentFormat; + highlightingRules.append(rule); + + QTextCharFormat specialCharacterFormat; + specialCharacterFormat.setForeground(specialCharacterColor); + rule.pattern = QRegularExpression("&[a-zA-Z0-9#]*;"); + rule.format = specialCharacterFormat; + highlightingRules.append(rule); + + QTextCharFormat doctypeFormat; + doctypeFormat.setForeground(doctypeColor); + rule.pattern = QRegularExpression(""); + rule.format = doctypeFormat; + highlightingRules.append(rule); + } + return highlightingRules; +} + +static QVector phpHighlightingRules() +{ + static QVector highlightingRules; + if (highlightingRules.isEmpty()) { + + HighlightingRule rule; + + QTextCharFormat functionCallFormat; + functionCallFormat.setForeground(functionCallColor); + rule.pattern = QRegularExpression("\\b(\\w+)\\s*(?=\\()"); + rule.format = functionCallFormat; + highlightingRules.append(rule); + + QTextCharFormat functionFormat; + functionFormat.setForeground(functionColor); + rule.pattern = QRegularExpression("\\bfunction\\s+(\\w+)\\b"); + rule.format = functionFormat; + highlightingRules.append(rule); + + QTextCharFormat numberFormat; + numberFormat.setForeground(numberColor); + rule.pattern = QRegularExpression("\\b[0-9]*\\.?[0-9]+\\b"); + rule.format = numberFormat; + highlightingRules.append(rule); + + QTextCharFormat keywordFormat; + keywordFormat.setForeground(keywordColor); + QStringList keywordPatterns = { + "\\bif\\b", "\\belse\\b", "\\belseif\\b", "\\bwhile\\b", "\\bfor\\b", + "\\bforeach\\b", "\\breturn\\b", "\\bprint\\b", "\\binclude\\b", "\\brequire\\b", + "\\binclude_once\\b", "\\brequire_once\\b", "\\btry\\b", "\\bcatch\\b", + "\\bfinally\\b", "\\bcontinue\\b", "\\bbreak\\b", "\\bclass\\b", "\\bfunction\\b", + "\\bnew\\b", "\\bthrow\\b", "\\barray\\b", "\\bpublic\\b", "\\bprivate\\b", + "\\bprotected\\b", "\\bstatic\\b", "\\bglobal\\b", "\\bisset\\b", "\\bunset\\b", + "\\bnull\\b", "\\btrue\\b", "\\bfalse\\b" + }; + + for (const QString &pattern : keywordPatterns) { + rule.pattern = QRegularExpression(pattern); + rule.format = keywordFormat; + highlightingRules.append(rule); + } + + QTextCharFormat stringFormat; + stringFormat.setForeground(stringColor); + rule.pattern = QRegularExpression("\".*?\""); + rule.format = stringFormat; + highlightingRules.append(rule); + + rule.pattern = QRegularExpression("\'.*?\'"); + rule.format = stringFormat; + highlightingRules.append(rule); + + QTextCharFormat commentFormat; + commentFormat.setForeground(commentColor); + rule.pattern = QRegularExpression("//[^\n]*"); + rule.format = commentFormat; + highlightingRules.append(rule); + + rule.pattern = QRegularExpression("/\\*.*?\\*/"); + rule.format = commentFormat; + highlightingRules.append(rule); + } + return highlightingRules; +} + + static QVector jsonHighlightingRules() { static QVector highlightingRules; @@ -616,6 +759,12 @@ void SyntaxHighlighter::highlightBlock(const QString &text) rules = javaHighlightingRules(); else if (block.userState() == Json) rules = jsonHighlightingRules(); + else if (block.userState() == Latex) + rules = latexHighlightingRules(); + else if (block.userState() == Html) + rules = htmlHighlightingRules(); + else if (block.userState() == Php) + rules = phpHighlightingRules(); for (const HighlightingRule &rule : qAsConst(rules)) { QRegularExpressionMatchIterator matchIterator = rule.pattern.globalMatch(text); @@ -821,7 +970,10 @@ void ResponseText::handleCodeBlocks() || firstWord == "java" || firstWord == "go" || firstWord == "golang" - || firstWord == "json") { + || firstWord == "json" + || firstWord == "latex" + || firstWord == "html" + || firstWord == "php") { codeLanguage = firstWord; capturedText.remove(0, match.captured(0).length()); } From f543affa9af9d52f5b624b2ea348bf40761a4ef1 Mon Sep 17 00:00:00 2001 From: Adam Treat Date: Fri, 14 Jul 2023 14:12:09 -0400 Subject: [PATCH 149/198] Add better docs and threading support to bert. --- .../python/docs/gpt4all_python.md | 20 +---------- .../python/docs/gpt4all_python_embedding.md | 35 +++++++++++++++++++ gpt4all-bindings/python/gpt4all/gpt4all.py | 10 ++++-- .../gpt4all/tests/test_embed_timings.py | 18 ++++++++++ gpt4all-bindings/python/mkdocs.yml | 6 ++-- gpt4all-bindings/python/setup.py | 2 +- 6 files changed, 66 insertions(+), 25 deletions(-) create mode 100644 gpt4all-bindings/python/docs/gpt4all_python_embedding.md create mode 100644 gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py diff --git a/gpt4all-bindings/python/docs/gpt4all_python.md b/gpt4all-bindings/python/docs/gpt4all_python.md index c95f6b0dd38e..0d179b068013 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python.md +++ b/gpt4all-bindings/python/docs/gpt4all_python.md @@ -1,8 +1,7 @@ -# GPT4All Python API +# GPT4All Python Generation API The `GPT4All` python package provides bindings to our C/C++ model backend libraries. The source code and local build instructions can be found [here](https://github.com/nomic-ai/gpt4all/tree/main/gpt4all-bindings/python). - ## Quickstart ```bash @@ -109,22 +108,5 @@ with model.chat_session(): print(model.current_chat_session) ``` -### Generating embeddings -GPT4All includes a super simple means of generating embeddings for your text documents. The embedding model will automatically be downloaded if not installed. - -=== "Embed4All Example" - ``` py - from gpt4all import GPT4All, Embed4All - text = 'The quick brown fox jumps over the lazy dog' - embedder = Embed4All() - output = embedder.embed(text) - print(output) - ``` -=== "Output" - ``` - [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...] - ``` - ### API documentation ::: gpt4all.gpt4all.GPT4All -::: gpt4all.gpt4all.Embed4All diff --git a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md new file mode 100644 index 000000000000..8faaec3f7086 --- /dev/null +++ b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md @@ -0,0 +1,35 @@ +# GPT4All Python Embedding API +GPT4All includes a super simple means of generating embeddings for your text documents. + +## Quickstart + +```bash +pip install gpt4all +``` + +### Generating embeddings +The embedding model will automatically be downloaded if not installed. + +=== "Embed4All Example" + ``` py + from gpt4all import GPT4All, Embed4All + text = 'The quick brown fox jumps over the lazy dog' + embedder = Embed4All() + output = embedder.embed(text) + print(output) + ``` +=== "Output" + ``` + [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...] + ``` +### Speed of embedding generation +The following table lists the generation speed for text documents of N tokens captured on an Intel i913900HX CPU with DDR5 5600 running with 8 threads under stable load. + +| Tokens | 2^7 | 2^9 | 2^11 | 2^13 | 2^14 | +| --------------- | ---- | ---- | ---- | ---- | ---- | +| Wall time (s) | .02 | .08 | .24 | .96 | 1.9 | +| Tokens / Second | 6508 | 6431 | 8622 | 8509 | 8369 | + + +### API documentation +::: gpt4all.gpt4all.Embed4All diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index 3b1a27fd0f10..e00919722626 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -20,12 +20,16 @@ class Embed4All: Python class that handles embeddings for GPT4All. """ def __init__( - self + self, + n_threads: Optional[int] = None, ): """ Constructor + + Args: + n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically. """ - self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') + self.gpt4all = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin', n_threads=n_threads) def embed( self, @@ -65,7 +69,7 @@ def __init__( model_type: Model architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user. Default is None. allow_download: Allow API to download models from gpt4all.io. Default is True. - n_threads: number of CPU threads used by GPT4All. Default is None, than the number of threads are determined automatically. + n_threads: number of CPU threads used by GPT4All. Default is None, then the number of threads are determined automatically. """ self.model_type = model_type self.model = pyllmodel.LLModel() diff --git a/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py b/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py new file mode 100644 index 000000000000..01b3f6664965 --- /dev/null +++ b/gpt4all-bindings/python/gpt4all/tests/test_embed_timings.py @@ -0,0 +1,18 @@ +import sys +from io import StringIO + +from gpt4all import GPT4All, Embed4All +import time + +def time_embedding(i, embedder): + text = 'foo bar ' * i + start_time = time.time() + output = embedder.embed(text) + end_time = time.time() + elapsed_time = end_time - start_time + print(f"Time report: {2 * i / elapsed_time} tokens/second with {2 * i} tokens taking {elapsed_time} seconds") + +if __name__ == "__main__": + embedder = Embed4All(n_threads=8) + for i in [2**n for n in range(6, 14)]: + time_embedding(i, embedder) diff --git a/gpt4all-bindings/python/mkdocs.yml b/gpt4all-bindings/python/mkdocs.yml index 675a09717806..60e3961eef1a 100644 --- a/gpt4all-bindings/python/mkdocs.yml +++ b/gpt4all-bindings/python/mkdocs.yml @@ -10,7 +10,9 @@ use_directory_urls: false nav: - 'index.md' - 'Bindings': - - 'GPT4All in Python': 'gpt4all_python.md' + - 'GPT4All in Python': + - 'Generation': 'gpt4all_python.md' + - 'Embedding': 'gpt4all_python_embedding.md' - 'GPT4All Chat Client': 'gpt4all_chat.md' - 'gpt4all_cli.md' # - 'Tutorials': @@ -68,4 +70,4 @@ plugins: #- mkdocs-jupyter: # ignore_h1_titles: True - # show_input: True \ No newline at end of file + # show_input: True diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index e43d9ce9480a..8fbab0f3079a 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -61,7 +61,7 @@ def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): setup( name=package_name, - version="1.0.5", + version="1.0.6", description="Python bindings for GPT4All", author="Richard Guo", author_email="richard@nomic.ai", From 89e277bb3cdbbf9ecce92523aef4e0d18aaa6e5f Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Fri, 14 Jul 2023 14:30:14 -0400 Subject: [PATCH 150/198] Update gpt4all_python_embedding.md Signed-off-by: Andriy Mulyar --- gpt4all-bindings/python/docs/gpt4all_python_embedding.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md index 8faaec3f7086..bfa4ade4856d 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md +++ b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md @@ -1,5 +1,5 @@ -# GPT4All Python Embedding API -GPT4All includes a super simple means of generating embeddings for your text documents. +# Embeddings +GPT4All supports generating high quality embeddings of arbitrary length documents of text using a CPU optimized contrastively trained [Sentence Transformer](https://www.sbert.net/). These embeddings are comparable in quality for many tasks with OpenAI. ## Quickstart @@ -25,7 +25,7 @@ The embedding model will automatically be downloaded if not installed. ### Speed of embedding generation The following table lists the generation speed for text documents of N tokens captured on an Intel i913900HX CPU with DDR5 5600 running with 8 threads under stable load. -| Tokens | 2^7 | 2^9 | 2^11 | 2^13 | 2^14 | +| Tokens | 128 | 512 | 2048 | 8129 | 2^14 | | --------------- | ---- | ---- | ---- | ---- | ---- | | Wall time (s) | .02 | .08 | .24 | .96 | 1.9 | | Tokens / Second | 6508 | 6431 | 8622 | 8509 | 8369 | From 306105e62fd4f756f39110943b14b6c1f4474811 Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Fri, 14 Jul 2023 14:54:36 -0400 Subject: [PATCH 151/198] Update gpt4all_python_embedding.md Signed-off-by: Andriy Mulyar --- gpt4all-bindings/python/docs/gpt4all_python_embedding.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md index bfa4ade4856d..3d2071268e61 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md +++ b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md @@ -23,7 +23,7 @@ The embedding model will automatically be downloaded if not installed. [0.034696947783231735, -0.07192722707986832, 0.06923297047615051, ...] ``` ### Speed of embedding generation -The following table lists the generation speed for text documents of N tokens captured on an Intel i913900HX CPU with DDR5 5600 running with 8 threads under stable load. +The following table lists the generation speed for text document captured on an Intel i913900HX CPU with DDR5 5600 running with 8 threads under stable load. | Tokens | 128 | 512 | 2048 | 8129 | 2^14 | | --------------- | ---- | ---- | ---- | ---- | ---- | From cfd70b69fcf5e587b8e0e3e9b9aaa90e19cbbc51 Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Fri, 14 Jul 2023 14:54:56 -0400 Subject: [PATCH 152/198] Update gpt4all_python_embedding.md Signed-off-by: Andriy Mulyar --- gpt4all-bindings/python/docs/gpt4all_python_embedding.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md index 3d2071268e61..111b0568d2a4 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python_embedding.md +++ b/gpt4all-bindings/python/docs/gpt4all_python_embedding.md @@ -25,7 +25,7 @@ The embedding model will automatically be downloaded if not installed. ### Speed of embedding generation The following table lists the generation speed for text document captured on an Intel i913900HX CPU with DDR5 5600 running with 8 threads under stable load. -| Tokens | 128 | 512 | 2048 | 8129 | 2^14 | +| Tokens | 128 | 512 | 2048 | 8129 | 16,384 | | --------------- | ---- | ---- | ---- | ---- | ---- | | Wall time (s) | .02 | .08 | .24 | .96 | 1.9 | | Tokens / Second | 6508 | 6431 | 8622 | 8509 | 8369 | From 1e74171a7bbfa3a431fd068882c2912364d4ab26 Mon Sep 17 00:00:00 2001 From: Felix Zaslavskiy Date: Sat, 15 Jul 2023 18:07:42 -0400 Subject: [PATCH 153/198] Java binding - Improve error check before loading Model file (#1206) * Javav binding - Add check for Model file be Readable. * add todo for java binding. --------- Co-authored-by: Feliks Zaslavskiy Co-authored-by: felix --- gpt4all-bindings/java/README.md | 6 ++++-- gpt4all-bindings/java/TODO.md | 4 ++++ gpt4all-bindings/java/pom.xml | 2 +- .../src/main/java/com/hexadevlabs/gpt4all/LLModel.java | 7 ++++++- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/gpt4all-bindings/java/README.md b/gpt4all-bindings/java/README.md index b93008827d72..9f712d760d61 100644 --- a/gpt4all-bindings/java/README.md +++ b/gpt4all-bindings/java/README.md @@ -12,12 +12,12 @@ You can add Java bindings into your Java project by adding the following depende com.hexadevlabs gpt4all-java-binding - 1.1.3 + 1.1.5 ``` **Gradle** ``` -implementation 'com.hexadevlabs:gpt4all-java-binding:1.1.3' +implementation 'com.hexadevlabs:gpt4all-java-binding:1.1.5' ``` To add the library dependency for another build system see [Maven Central Java bindings](https://central.sonatype.com/artifact/com.hexadevlabs/gpt4all-java-binding/). @@ -121,4 +121,6 @@ If this is the case you can easily download and install the latest x64 Microsoft 3. Version **1.1.4**: - Java bindings is compatible with gpt4all version 2.4.11 - Falcon model support included. +4. Version **1.1.5**: + - Add a check for model file readability before loading model. \ No newline at end of file diff --git a/gpt4all-bindings/java/TODO.md b/gpt4all-bindings/java/TODO.md index 3c85bf7138dd..48342f785cc2 100644 --- a/gpt4all-bindings/java/TODO.md +++ b/gpt4all-bindings/java/TODO.md @@ -1,2 +1,6 @@ +## Needed +1. Integrate with circleci build pipeline like the C# binding. + +## These are just ideas 1. Better Chat completions function. 2. Chat completion that returns result in OpenAI compatible format. diff --git a/gpt4all-bindings/java/pom.xml b/gpt4all-bindings/java/pom.xml index 7bfd9c27ab29..4687aa1a58c7 100644 --- a/gpt4all-bindings/java/pom.xml +++ b/gpt4all-bindings/java/pom.xml @@ -6,7 +6,7 @@ com.hexadevlabs gpt4all-java-binding - 1.1.4 + 1.1.5 jar diff --git a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java index 367f7ec0edf0..2e51a245751c 100644 --- a/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java +++ b/gpt4all-bindings/java/src/main/java/com/hexadevlabs/gpt4all/LLModel.java @@ -184,11 +184,16 @@ public LLModel(Path modelPath) { throw new IllegalStateException("Model file does not exist: " + modelPathAbs); } + // Check if file is Readable + if(!Files.isReadable(modelPath)){ + throw new IllegalStateException("Model file cannot be read: " + modelPathAbs); + } + // Create Model Struct. Will load dynamically the correct backend based on model type model = library.llmodel_model_create2(modelPathAbs, "auto", error); if(model == null) { - throw new IllegalStateException("Could not load gpt4all backend :" + error.message); + throw new IllegalStateException("Could not load, gpt4all backend returned error: " + error.message); } library.llmodel_loadModel(model, modelPathAbs); From 2d02c65177d81e9945f54d59606394119078b2c4 Mon Sep 17 00:00:00 2001 From: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> Date: Mon, 17 Jul 2023 22:21:03 +0200 Subject: [PATCH 154/198] Handle edge cases when generating embeddings (#1215) * Handle edge cases when generating embeddings * Improve Python handling & add llmodel_c.h note - In the Python bindings fail fast with a ValueError when text is empty - Advice other bindings authors to do likewise in llmodel_c.h --- gpt4all-backend/llmodel_c.cpp | 6 +++++- gpt4all-backend/llmodel_c.h | 2 ++ gpt4all-bindings/python/gpt4all/pyllmodel.py | 2 ++ gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py | 7 +++++++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/gpt4all-backend/llmodel_c.cpp b/gpt4all-backend/llmodel_c.cpp index fb916d951d0e..58fe27f520f7 100644 --- a/gpt4all-backend/llmodel_c.cpp +++ b/gpt4all-backend/llmodel_c.cpp @@ -168,10 +168,14 @@ void llmodel_prompt(llmodel_model model, const char *prompt, float *llmodel_embedding(llmodel_model model, const char *text, size_t *embedding_size) { + if (model == nullptr || text == nullptr || !strlen(text)) { + *embedding_size = 0; + return nullptr; + } LLModelWrapper *wrapper = reinterpret_cast(model); std::vector embeddingVector = wrapper->llModel->embedding(text); float *embedding = (float *)malloc(embeddingVector.size() * sizeof(float)); - if(embedding == nullptr) { + if (embedding == nullptr) { *embedding_size = 0; return nullptr; } diff --git a/gpt4all-backend/llmodel_c.h b/gpt4all-backend/llmodel_c.h index 8d582d08cc3d..138a8853582b 100644 --- a/gpt4all-backend/llmodel_c.h +++ b/gpt4all-backend/llmodel_c.h @@ -173,6 +173,8 @@ void llmodel_prompt(llmodel_model model, const char *prompt, /** * Generate an embedding using the model. + * NOTE: If given NULL pointers for the model or text, or an empty text, a NULL pointer will be + * returned. Bindings should signal an error when NULL is the return value. * @param model A pointer to the llmodel_model instance. * @param text A string representing the text to generate an embedding for. * @param embedding_size A pointer to a size_t type that will be set by the call indicating the length diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py index e8895a9c0c64..91395f538c08 100644 --- a/gpt4all-bindings/python/gpt4all/pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -251,6 +251,8 @@ def generate_embedding( self, text: str ) -> list[float]: + if not text: + raise ValueError("Text must not be None or empty") embedding_size = ctypes.c_size_t() c_text = ctypes.c_char_p(text.encode('utf-8')) embedding_ptr = llmodel.llmodel_embedding(self.model, c_text, ctypes.byref(embedding_size)) diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index 6fdaa6cca722..fa798c0c3b5e 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -3,6 +3,7 @@ from gpt4all import GPT4All, Embed4All import time +import pytest def test_inference(): model = GPT4All(model_name='orca-mini-3b.ggmlv3.q4_0.bin') @@ -107,3 +108,9 @@ def test_embedding(): #for i, value in enumerate(output): #print(f'Value at index {i}: {value}') assert len(output) == 384 + +def test_empty_embedding(): + text = '' + embedder = Embed4All() + with pytest.raises(ValueError): + output = embedder.embed(text) From 63849d9afcc7ca9ed2b00df2d51809f84b1a9a31 Mon Sep 17 00:00:00 2001 From: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> Date: Sat, 15 Jul 2023 17:04:26 +0200 Subject: [PATCH 155/198] Add AVX/AVX2 requirement to main README.md Signed-off-by: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 30d6ca09551d..98cd1ed93c08 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Run on an M1 macOS Device (not sped up!)

          ## GPT4All: An ecosystem of open-source on-edge large language models. -GPT4All is an ecosystem to train and deploy **powerful** and **customized** large language models that run locally on consumer grade CPUs. +GPT4All is an ecosystem to train and deploy **powerful** and **customized** large language models that run locally on consumer grade CPUs. Note that your CPU needs to support [AVX or AVX2 instructions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions). Learn more in the [documentation](https://docs.gpt4all.io). From 4974ae917c920da50aa27bda54e98133876726c7 Mon Sep 17 00:00:00 2001 From: AMOGUS <137312610+Amogus8P@users.noreply.github.com> Date: Tue, 18 Jul 2023 05:17:17 +0300 Subject: [PATCH 156/198] Update default TopP to 0.4 TopP 0.1 was found to be somewhat too aggressive, so a more moderate default of 0.4 would be better suited for general use. Signed-off-by: AMOGUS <137312610+Amogus8P@users.noreply.github.com> --- gpt4all-chat/modellist.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-chat/modellist.h b/gpt4all-chat/modellist.h index cc70c5f0d2e0..c9608a2a99c7 100644 --- a/gpt4all-chat/modellist.h +++ b/gpt4all-chat/modellist.h @@ -108,7 +108,7 @@ struct ModelInfo { QString m_name; QString m_filename; double m_temperature = 0.7; - double m_topP = 0.1; + double m_topP = 0.4; int m_topK = 40; int m_maxLength = 4096; int m_promptBatchSize = 128; From 5f0aaf8bdb166ea3b5bfd578c2b19f61b583e6a9 Mon Sep 17 00:00:00 2001 From: AMOGUS <137312610+Amogus8P@users.noreply.github.com> Date: Tue, 18 Jul 2023 05:37:01 +0300 Subject: [PATCH 157/198] python binding's TopP also needs some love Changed the Python binding's TopP from 0.1 to 0.4 Signed-off-by: AMOGUS <137312610+Amogus8P@users.noreply.github.com> --- gpt4all-bindings/python/gpt4all/gpt4all.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index e00919722626..bf409b5174fb 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -212,7 +212,7 @@ def generate( max_tokens: int = 200, temp: float = 0.7, top_k: int = 40, - top_p: float = 0.1, + top_p: float = 0.4, repeat_penalty: float = 1.18, repeat_last_n: int = 64, n_batch: int = 8, From b4dbbd148574bcf7fc42c896387caa18363144a0 Mon Sep 17 00:00:00 2001 From: 385olt <385olt@gmail.com> Date: Thu, 20 Jul 2023 00:36:49 +0200 Subject: [PATCH 158/198] Python bindings: Custom callbacks, chat session improvement, refactoring (#1145) * Added the following features: \n 1) Now prompt_model uses the positional argument callback to return the response tokens. \n 2) Due to the callback argument of prompt_model, prompt_model_streaming only manages the queue and threading now, which reduces duplication of the code. \n 3) Added optional verbose argument to prompt_model which prints out the prompt that is passed to the model. \n 4) Chat sessions can now have a header, i.e. an instruction before the transcript of the conversation. The header is set at the creation of the chat session context. \n 5) generate function now accepts an optional callback. \n 6) When streaming and using chat session, the user doesn't need to save assistant's messages by himself. This is done automatically. * added _empty_response_callback so I don't have to check if callback is None * added docs * now if the callback stop generation, the last token is ignored * fixed type hints, reimplemented chat session header as a system prompt, minor refactoring, docs: removed section about manual update of chat session for streaming * forgot to add some type hints! * keep the config of the model in GPT4All class which is taken from models.json if the download is allowed * During chat sessions, the model-specific systemPrompt and promptTemplate are applied. * implemented the changes * Fixed typing. Now the user can set a prompt template that will be applied even outside of a chat session. The template can also have multiple placeholders that can be filled by passing a dictionary to the generate function * reversed some changes concerning the prompt templates and their functionality * fixed some type hints, changed list[float] to List[Float] * fixed type hints, changed List[Float] to List[float] * fix typo in the comment: Pepare => Prepare --------- Signed-off-by: 385olt <385olt@gmail.com> --- .../python/docs/gpt4all_python.md | 18 -- gpt4all-bindings/python/gpt4all/gpt4all.py | 198 ++++++++++++++---- gpt4all-bindings/python/gpt4all/pyllmodel.py | 141 ++++++------- 3 files changed, 212 insertions(+), 145 deletions(-) diff --git a/gpt4all-bindings/python/docs/gpt4all_python.md b/gpt4all-bindings/python/docs/gpt4all_python.md index 0d179b068013..ad3e1c5540d4 100644 --- a/gpt4all-bindings/python/docs/gpt4all_python.md +++ b/gpt4all-bindings/python/docs/gpt4all_python.md @@ -91,22 +91,4 @@ To interact with GPT4All responses as the model generates, use the `streaming = [' Paris', ' is', ' a', ' city', ' that', ' has', ' been', ' a', ' major', ' cultural', ' and', ' economic', ' center', ' for', ' over', ' ', '2', ',', '0', '0'] ``` -#### Streaming and Chat Sessions -When streaming tokens in a chat session, you must manually handle collection and updating of the chat history. - -```python -from gpt4all import GPT4All -model = GPT4All("orca-mini-3b.ggmlv3.q4_0.bin") - -with model.chat_session(): - tokens = list(model.generate(prompt='hello', top_k=1, streaming=True)) - model.current_chat_session.append({'role': 'assistant', 'content': ''.join(tokens)}) - - tokens = list(model.generate(prompt='write me a poem about dogs', top_k=1, streaming=True)) - model.current_chat_session.append({'role': 'assistant', 'content': ''.join(tokens)}) - - print(model.current_chat_session) -``` - -### API documentation ::: gpt4all.gpt4all.GPT4All diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index bf409b5174fb..ed6427312418 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -5,7 +5,7 @@ import time from contextlib import contextmanager from pathlib import Path -from typing import Dict, Iterable, List, Union, Optional +from typing import Any, Dict, Iterable, List, Union, Optional import requests from tqdm import tqdm @@ -13,7 +13,17 @@ from . import pyllmodel # TODO: move to config -DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace("\\", "\\\\") +DEFAULT_MODEL_DIRECTORY = os.path.join(str(Path.home()), ".cache", "gpt4all").replace( + "\\", "\\\\" +) + +DEFAULT_MODEL_CONFIG = { + "systemPrompt": "", + "promptTemplate": "### Human: \n{0}\n### Assistant:\n", +} + +ConfigType = Dict[str,str] +MessageType = Dict[str, str] class Embed4All: """ @@ -34,7 +44,7 @@ def __init__( def embed( self, text: str - ) -> list[float]: + ) -> List[float]: """ Generate an embedding. @@ -74,17 +84,20 @@ def __init__( self.model_type = model_type self.model = pyllmodel.LLModel() # Retrieve model and download if allowed - model_dest = self.retrieve_model(model_name, model_path=model_path, allow_download=allow_download) - self.model.load_model(model_dest) + self.config: ConfigType = self.retrieve_model( + model_name, model_path=model_path, allow_download=allow_download + ) + self.model.load_model(self.config["path"]) # Set n_threads if n_threads is not None: self.model.set_thread_count(n_threads) - self._is_chat_session_activated = False - self.current_chat_session = [] + self._is_chat_session_activated: bool = False + self.current_chat_session: List[MessageType] = empty_chat_session() + self._current_prompt_template: str = "{0}" @staticmethod - def list_models() -> Dict: + def list_models() -> List[ConfigType]: """ Fetch model list from https://gpt4all.io/models/models.json. @@ -95,8 +108,11 @@ def list_models() -> Dict: @staticmethod def retrieve_model( - model_name: str, model_path: Optional[str] = None, allow_download: bool = True, verbose: bool = True - ) -> str: + model_name: str, + model_path: Optional[str] = None, + allow_download: bool = True, + verbose: bool = True, + ) -> ConfigType: """ Find model file, and if it doesn't exist, download the model. @@ -108,11 +124,25 @@ def retrieve_model( verbose: If True (default), print debug messages. Returns: - Model file destination. + Model config. """ model_filename = append_bin_suffix_if_missing(model_name) + # get the config for the model + config: ConfigType = DEFAULT_MODEL_CONFIG + if allow_download: + available_models = GPT4All.list_models() + + for m in available_models: + if model_filename == m["filename"]: + config.update(m) + config["systemPrompt"] = config["systemPrompt"].strip() + config["promptTemplate"] = config["promptTemplate"].replace( + "%1", "{0}", 1 + ) # change to Python-style formatting + break + # Validate download directory if model_path is None: try: @@ -131,31 +161,34 @@ def retrieve_model( model_dest = os.path.join(model_path, model_filename).replace("\\", "\\\\") if os.path.exists(model_dest): + config.pop("url", None) + config["path"] = model_dest if verbose: print("Found model file at ", model_dest) - return model_dest # If model file does not exist, download elif allow_download: # Make sure valid model filename before attempting download - available_models = GPT4All.list_models() - - selected_model = None - for m in available_models: - if model_filename == m['filename']: - selected_model = m - break - if selected_model is None: + if "url" not in config: raise ValueError(f"Model filename not in model list: {model_filename}") - url = selected_model.pop('url', None) + url = config.pop("url", None) - return GPT4All.download_model(model_filename, model_path, verbose=verbose, url=url) + config["path"] = GPT4All.download_model( + model_filename, model_path, verbose=verbose, url=url + ) else: raise ValueError("Failed to retrieve model") + return config + @staticmethod - def download_model(model_filename: str, model_path: str, verbose: bool = True, url: Optional[str] = None) -> str: + def download_model( + model_filename: str, + model_path: str, + verbose: bool = True, + url: Optional[str] = None, + ) -> str: """ Download model from https://gpt4all.io. @@ -191,7 +224,7 @@ def get_download_url(model_filename): except Exception: if os.path.exists(download_path): if verbose: - print('Cleaning up the interrupted download...') + print("Cleaning up the interrupted download...") os.remove(download_path) raise @@ -218,7 +251,8 @@ def generate( n_batch: int = 8, n_predict: Optional[int] = None, streaming: bool = False, - ) -> Union[str, Iterable]: + callback: pyllmodel.ResponseCallbackType = pyllmodel.empty_response_callback, + ) -> Union[str, Iterable[str]]: """ Generate outputs from any GPT4All model. @@ -233,12 +267,14 @@ def generate( n_batch: Number of prompt tokens processed in parallel. Larger values decrease latency but increase resource requirements. n_predict: Equivalent to max_tokens, exists for backwards compatibility. streaming: If True, this method will instead return a generator that yields tokens as the model generates them. + callback: A function with arguments token_id:int and response:str, which receives the tokens from the model as they are generated and stops the generation by returning False. Returns: Either the entire completion or a generator that yields the completion token by token. """ - generate_kwargs = dict( - prompt=prompt, + + # Preparing the model request + generate_kwargs: Dict[str, Any] = dict( temp=temp, top_k=top_k, top_p=top_p, @@ -249,42 +285,87 @@ def generate( ) if self._is_chat_session_activated: + generate_kwargs["reset_context"] = len(self.current_chat_session) == 1 # check if there is only one message, i.e. system prompt self.current_chat_session.append({"role": "user", "content": prompt}) - generate_kwargs['prompt'] = self._format_chat_prompt_template(messages=self.current_chat_session[-1:]) - generate_kwargs['reset_context'] = len(self.current_chat_session) == 1 - else: - generate_kwargs['reset_context'] = True - if streaming: - return self.model.prompt_model_streaming(**generate_kwargs) + prompt = self._format_chat_prompt_template( + messages = self.current_chat_session[-1:], + default_prompt_header = self.current_chat_session[0]["content"] if generate_kwargs["reset_context"] else "", + ) + else: + generate_kwargs["reset_context"] = True - output = self.model.prompt_model(**generate_kwargs) + # Prepare the callback, process the model response + output_collector: List[MessageType] + output_collector = [{"content": ""}] # placeholder for the self.current_chat_session if chat session is not activated if self._is_chat_session_activated: - self.current_chat_session.append({"role": "assistant", "content": output}) + self.current_chat_session.append({"role": "assistant", "content": ""}) + output_collector = self.current_chat_session + + def _callback_wrapper( + callback: pyllmodel.ResponseCallbackType, + output_collector: List[MessageType], + ) -> pyllmodel.ResponseCallbackType: - return output + def _callback(token_id: int, response: str) -> bool: + nonlocal callback, output_collector + + output_collector[-1]["content"] += response + + return callback(token_id, response) + + return _callback + + # Send the request to the model + if streaming: + return self.model.prompt_model_streaming( + prompt=prompt, + callback=_callback_wrapper(callback, output_collector), + **generate_kwargs, + ) + + self.model.prompt_model( + prompt=prompt, + callback=_callback_wrapper(callback, output_collector), + **generate_kwargs, + ) + + return output_collector[-1]["content"] @contextmanager - def chat_session(self): - ''' + def chat_session( + self, + system_prompt: str = "", + prompt_template: str = "", + ): + """ Context manager to hold an inference optimized chat session with a GPT4All model. - ''' + + Args: + system_prompt: An initial instruction for the model. + prompt_template: Template for the prompts with {0} being replaced by the user message. + """ # Code to acquire resource, e.g.: self._is_chat_session_activated = True - self.current_chat_session = [] + self.current_chat_session = empty_chat_session(system_prompt or self.config["systemPrompt"]) + self._current_prompt_template = prompt_template or self.config["promptTemplate"] try: yield self finally: # Code to release resource, e.g.: self._is_chat_session_activated = False - self.current_chat_session = [] + self.current_chat_session = empty_chat_session() + self._current_prompt_template = "{0}" def _format_chat_prompt_template( - self, messages: List[Dict], default_prompt_header=True, default_prompt_footer=True + self, + messages: List[MessageType], + default_prompt_header: str = "", + default_prompt_footer: str = "", ) -> str: """ - Helper method for building a prompt using template from list of messages. + Helper method for building a prompt from list of messages using the self._current_prompt_template as a template for each message. Args: messages: List of dictionaries. Each dictionary should have a "role" key @@ -296,19 +377,44 @@ def _format_chat_prompt_template( Returns: Formatted prompt. """ - full_prompt = "" + + if isinstance(default_prompt_header, bool): + import warnings + + warnings.warn( + "Using True/False for the 'default_prompt_header' is deprecated. Use a string instead.", + DeprecationWarning, + ) + default_prompt_header = "" + + if isinstance(default_prompt_footer, bool): + import warnings + + warnings.warn( + "Using True/False for the 'default_prompt_footer' is deprecated. Use a string instead.", + DeprecationWarning, + ) + default_prompt_footer = "" + + full_prompt = default_prompt_header + "\n\n" if default_prompt_header != "" else "" for message in messages: if message["role"] == "user": - user_message = "### Human: \n" + message["content"] + "\n### Assistant:\n" + user_message = self._current_prompt_template.format(message["content"]) full_prompt += user_message if message["role"] == "assistant": - assistant_message = message["content"] + '\n' + assistant_message = message["content"] + "\n" full_prompt += assistant_message + full_prompt += "\n\n" + default_prompt_footer if default_prompt_footer != "" else "" + return full_prompt +def empty_chat_session(system_prompt: str = "") -> List[MessageType]: + return [{"role": "system", "content": system_prompt}] + + def append_bin_suffix_if_missing(model_name): if not model_name.endswith(".bin"): model_name += ".bin" diff --git a/gpt4all-bindings/python/gpt4all/pyllmodel.py b/gpt4all-bindings/python/gpt4all/pyllmodel.py index 91395f538c08..14f35626394e 100644 --- a/gpt4all-bindings/python/gpt4all/pyllmodel.py +++ b/gpt4all-bindings/python/gpt4all/pyllmodel.py @@ -6,26 +6,19 @@ import subprocess import sys import threading -from typing import Iterable +import logging +from typing import Iterable, Callable, List import pkg_resources - -class DualStreamProcessor: - def __init__(self, stream=None): - self.stream = stream - self.output = "" - - def write(self, text): - if self.stream is not None: - self.stream.write(text) - self.stream.flush() - self.output += text +logger: logging.Logger = logging.getLogger(__name__) # TODO: provide a config file to make this more robust LLMODEL_PATH = os.path.join("llmodel_DO_NOT_MODIFY", "build").replace("\\", "\\\\") -MODEL_LIB_PATH = str(pkg_resources.resource_filename("gpt4all", LLMODEL_PATH)).replace("\\", "\\\\") +MODEL_LIB_PATH = str(pkg_resources.resource_filename("gpt4all", LLMODEL_PATH)).replace( + "\\", "\\\\" +) def load_llmodel_library(): @@ -43,9 +36,9 @@ def get_c_shared_lib_extension(): c_lib_ext = get_c_shared_lib_extension() - llmodel_file = "libllmodel" + '.' + c_lib_ext + llmodel_file = "libllmodel" + "." + c_lib_ext - llmodel_dir = str(pkg_resources.resource_filename('gpt4all', os.path.join(LLMODEL_PATH, llmodel_file))).replace( + llmodel_dir = str(pkg_resources.resource_filename("gpt4all", os.path.join(LLMODEL_PATH, llmodel_file))).replace( "\\", "\\\\" ) @@ -134,7 +127,15 @@ class LLModelPromptContext(ctypes.Structure): llmodel.llmodel_threadCount.argtypes = [ctypes.c_void_p] llmodel.llmodel_threadCount.restype = ctypes.c_int32 -llmodel.llmodel_set_implementation_search_path(MODEL_LIB_PATH.encode('utf-8')) +llmodel.llmodel_set_implementation_search_path(MODEL_LIB_PATH.encode("utf-8")) + + +ResponseCallbackType = Callable[[int, str], bool] +RawResponseCallbackType = Callable[[int, bytes], bool] + + +def empty_response_callback(token_id: int, response: str) -> bool: + return True class LLModel: @@ -250,9 +251,10 @@ def _set_context( def generate_embedding( self, text: str - ) -> list[float]: + ) -> List[float]: if not text: raise ValueError("Text must not be None or empty") + embedding_size = ctypes.c_size_t() c_text = ctypes.c_char_p(text.encode('utf-8')) embedding_ptr = llmodel.llmodel_embedding(self.model, c_text, ctypes.byref(embedding_size)) @@ -263,6 +265,7 @@ def generate_embedding( def prompt_model( self, prompt: str, + callback: ResponseCallbackType, n_predict: int = 4096, top_k: int = 40, top_p: float = 0.9, @@ -272,8 +275,7 @@ def prompt_model( repeat_last_n: int = 10, context_erase: float = 0.75, reset_context: bool = False, - streaming=False, - ) -> str: + ): """ Generate response from model from a prompt. @@ -281,25 +283,23 @@ def prompt_model( ---------- prompt: str Question, task, or conversation for model to respond to - streaming: bool - Stream response to stdout + callback(token_id:int, response:str): bool + The model sends response tokens to callback Returns ------- - Model response str + None """ - prompt_bytes = prompt.encode('utf-8') - prompt_ptr = ctypes.c_char_p(prompt_bytes) - - old_stdout = sys.stdout - - stream_processor = DualStreamProcessor() - - if streaming: - stream_processor.stream = sys.stdout + logger.info( + "LLModel.prompt_model -- prompt:\n" + + "%s\n" + + "===/LLModel.prompt_model -- prompt/===", + prompt, + ) - sys.stdout = stream_processor + prompt_bytes = prompt.encode("utf-8") + prompt_ptr = ctypes.c_char_p(prompt_bytes) self._set_context( n_predict=n_predict, @@ -317,56 +317,37 @@ def prompt_model( self.model, prompt_ptr, PromptCallback(self._prompt_callback), - ResponseCallback(self._response_callback), + ResponseCallback(self._callback_decoder(callback)), RecalculateCallback(self._recalculate_callback), self.context, ) - # Revert to old stdout - sys.stdout = old_stdout - # Force new line - return stream_processor.output - def prompt_model_streaming( self, prompt: str, - n_predict: int = 4096, - top_k: int = 40, - top_p: float = 0.9, - temp: float = 0.1, - n_batch: int = 8, - repeat_penalty: float = 1.2, - repeat_last_n: int = 10, - context_erase: float = 0.75, - reset_context: bool = False, - ) -> Iterable: + callback: ResponseCallbackType = empty_response_callback, + **kwargs + ) -> Iterable[str]: # Symbol to terminate from generator TERMINATING_SYMBOL = object() output_queue = queue.Queue() - prompt_bytes = prompt.encode('utf-8') - prompt_ptr = ctypes.c_char_p(prompt_bytes) + # Put response tokens into an output queue + def _generator_callback_wrapper(callback: ResponseCallbackType) -> ResponseCallbackType: + def _generator_callback(token_id: int, response: str): + nonlocal callback - self._set_context( - n_predict=n_predict, - top_k=top_k, - top_p=top_p, - temp=temp, - n_batch=n_batch, - repeat_penalty=repeat_penalty, - repeat_last_n=repeat_last_n, - context_erase=context_erase, - reset_context=reset_context, - ) + if callback(token_id, response): + output_queue.put(response) + return True - # Put response tokens into an output queue - def _generator_response_callback(token_id, response): - output_queue.put(response.decode('utf-8', 'replace')) - return True + return False + + return _generator_callback - def run_llmodel_prompt(model, prompt, prompt_callback, response_callback, recalculate_callback, context): - llmodel.llmodel_prompt(model, prompt, prompt_callback, response_callback, recalculate_callback, context) + def run_llmodel_prompt(prompt: str, callback: ResponseCallbackType, **kwargs): + self.prompt_model(prompt, callback, **kwargs) output_queue.put(TERMINATING_SYMBOL) # Kick off llmodel_prompt in separate thread so we can return generator @@ -374,13 +355,10 @@ def run_llmodel_prompt(model, prompt, prompt_callback, response_callback, recalc thread = threading.Thread( target=run_llmodel_prompt, args=( - self.model, - prompt_ptr, - PromptCallback(self._prompt_callback), - ResponseCallback(_generator_response_callback), - RecalculateCallback(self._recalculate_callback), - self.context, + prompt, + _generator_callback_wrapper(callback) ), + kwargs=kwargs, ) thread.start() @@ -391,18 +369,19 @@ def run_llmodel_prompt(model, prompt, prompt_callback, response_callback, recalc break yield response - # Empty prompt callback - @staticmethod - def _prompt_callback(token_id): - return True + def _callback_decoder(self, callback: ResponseCallbackType) -> RawResponseCallbackType: + def _raw_callback(token_id: int, response: bytes) -> bool: + nonlocal callback + return callback(token_id, response.decode("utf-8", "replace")) - # Empty response callback method that just prints response to be collected + return _raw_callback + + # Empty prompt callback @staticmethod - def _response_callback(token_id, response): - sys.stdout.write(response.decode('utf-8', 'replace')) + def _prompt_callback(token_id: int) -> bool: return True # Empty recalculate callback @staticmethod - def _recalculate_callback(is_recalculating): + def _recalculate_callback(is_recalculating: bool) -> bool: return is_recalculating From 58f0fcab57a0060451fefccbc330c57858e64386 Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Thu, 20 Jul 2023 21:23:29 -0400 Subject: [PATCH 159/198] Added health endpoint Signed-off-by: Andriy Mulyar --- gpt4all-api/gpt4all_api/app/api_v1/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/api.py b/gpt4all-api/gpt4all_api/app/api_v1/api.py index ab6f0dde53a5..e68af796c773 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/api.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/api.py @@ -1,4 +1,4 @@ -from api_v1.routes import chat, completions, engines +from api_v1.routes import chat, completions, engines, health from fastapi import APIRouter router = APIRouter() @@ -6,3 +6,4 @@ router.include_router(chat.router) router.include_router(completions.router) router.include_router(engines.router) +router.include_router(health.router) From 8aba2c9009fb6bc723f623c614e265b41722e4e3 Mon Sep 17 00:00:00 2001 From: Zach Nussbaum Date: Fri, 21 Jul 2023 14:13:29 -0500 Subject: [PATCH 160/198] GPU Inference Server (#1112) * feat: local inference server * fix: source to use bash + vars * chore: isort and black * fix: make file + inference mode * chore: logging * refactor: remove old links * fix: add new env vars * feat: hf inference server * refactor: remove old links * test: batch and single response * chore: black + isort * separate gpu and cpu dockerfiles * moved gpu to separate dockerfile * Fixed test endpoints * Edits to API. server won't start due to failed instantiation error * Method signature * fix: gpu_infer * tests: fix tests --------- Co-authored-by: Andriy Mulyar --- gpt4all-api/.isort.cfg | 7 + gpt4all-api/README.md | 12 ++ gpt4all-api/docker-compose.gpu.yaml | 24 +++ gpt4all-api/docker-compose.yaml | 3 +- gpt4all-api/gpt4all_api/app/api_v1/events.py | 7 +- .../gpt4all_api/app/api_v1/routes/chat.py | 20 +- .../app/api_v1/routes/completions.py | 187 +++++++++++++----- .../gpt4all_api/app/api_v1/routes/engines.py | 16 +- .../gpt4all_api/app/api_v1/routes/health.py | 1 + .../gpt4all_api/app/api_v1/settings.py | 8 + gpt4all-api/gpt4all_api/app/main.py | 37 ++-- .../gpt4all_api/app/tests/test_endpoints.py | 32 ++- gpt4all-api/gpt4all_api/requirements.txt | 8 +- gpt4all-api/makefile | 17 +- 14 files changed, 269 insertions(+), 110 deletions(-) create mode 100644 gpt4all-api/.isort.cfg create mode 100644 gpt4all-api/docker-compose.gpu.yaml diff --git a/gpt4all-api/.isort.cfg b/gpt4all-api/.isort.cfg new file mode 100644 index 000000000000..485c85a7c368 --- /dev/null +++ b/gpt4all-api/.isort.cfg @@ -0,0 +1,7 @@ +[settings] +known_third_party=geopy,nltk,np,numpy,pandas,pysbd,fire,torch + +line_length=120 +include_trailing_comma=True +multi_line_output=3 +use_parentheses=True \ No newline at end of file diff --git a/gpt4all-api/README.md b/gpt4all-api/README.md index ac8af2ad4fd8..a6b8c3c1820b 100644 --- a/gpt4all-api/README.md +++ b/gpt4all-api/README.md @@ -17,6 +17,18 @@ Then, start the backend with: docker compose up --build ``` +This will run both the API and locally hosted GPU inference server. If you want to run the API without the GPU inference server, you can run: + +```bash +docker compose up --build gpt4all_api +``` + +To run the API with the GPU inference server, you will need to include environment variables (like the `MODEL_ID`). Edit the `.env` file and run +```bash +docker compose --env-file .env up --build +``` + + #### Spinning up your app Run `docker compose up` to spin up the backend. Monitor the logs for errors in-case you forgot to set an environment variable above. diff --git a/gpt4all-api/docker-compose.gpu.yaml b/gpt4all-api/docker-compose.gpu.yaml new file mode 100644 index 000000000000..256986ca8c5a --- /dev/null +++ b/gpt4all-api/docker-compose.gpu.yaml @@ -0,0 +1,24 @@ +version: "3.8" + +services: + gpt4all_gpu: + image: ghcr.io/huggingface/text-generation-inference + container_name: gpt4all_gpu + restart: always #restart on error (usually code compilation from save during bad state) + environment: + - HUGGING_FACE_HUB_TOKEN=token + - USE_FLASH_ATTENTION=false + - MODEL_ID='' + - NUM_SHARD=1 + command: --model-id $MODEL_ID --num-shard $NUM_SHARD + volumes: + - ./:/data + ports: + - "8080:80" + shm_size: 1g + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] \ No newline at end of file diff --git a/gpt4all-api/docker-compose.yaml b/gpt4all-api/docker-compose.yaml index fbe9cd6327c9..0543452a1ae4 100644 --- a/gpt4all-api/docker-compose.yaml +++ b/gpt4all-api/docker-compose.yaml @@ -1,4 +1,4 @@ -version: "3.5" +version: "3.8" services: gpt4all_api: @@ -13,6 +13,7 @@ services: - LOGLEVEL=debug - PORT=4891 - model=ggml-mpt-7b-chat.bin + - inference_mode=cpu volumes: - './gpt4all_api/app:/app' command: ["/start-reload.sh"] \ No newline at end of file diff --git a/gpt4all-api/gpt4all_api/app/api_v1/events.py b/gpt4all-api/gpt4all_api/app/api_v1/events.py index 127b00df05a6..ba6f73fc78f7 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/events.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/events.py @@ -1,8 +1,10 @@ import logging + +from api_v1.settings import settings from fastapi import HTTPException from fastapi.responses import JSONResponse from starlette.requests import Request -from api_v1.settings import settings + log = logging.getLogger(__name__) @@ -19,8 +21,9 @@ async def on_startup(app): startup_msg = startup_msg_fmt.format(settings=settings) log.info(startup_msg) + def startup_event_handler(app): async def start_app() -> None: await on_startup(app) - return start_app \ No newline at end of file + return start_app diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/chat.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/chat.py index 109293315c86..4e7ad3d8f134 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/chat.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/chat.py @@ -1,9 +1,10 @@ -from fastapi import APIRouter, Depends, Response, Security, status -from pydantic import BaseModel, Field -from typing import List, Dict import logging import time +from typing import Dict, List + from api_v1.settings import settings +from fastapi import APIRouter, Depends, Response, Security, status +from pydantic import BaseModel, Field logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -11,11 +12,11 @@ ### This should follow https://github.com/openai/openai-openapi/blob/master/openapi.yaml - class ChatCompletionMessage(BaseModel): role: str content: str + class ChatCompletionRequest(BaseModel): model: str = Field(..., description='The model to generate a completion from.') messages: List[ChatCompletionMessage] = Field(..., description='The model to generate a completion from.') @@ -26,11 +27,13 @@ class ChatCompletionChoice(BaseModel): index: int finish_reason: str + class ChatCompletionUsage(BaseModel): prompt_tokens: int completion_tokens: int total_tokens: int + class ChatCompletionResponse(BaseModel): id: str object: str = 'text_completion' @@ -42,6 +45,7 @@ class ChatCompletionResponse(BaseModel): router = APIRouter(prefix="/chat", tags=["Completions Endpoints"]) + @router.post("/completions", response_model=ChatCompletionResponse) async def chat_completion(request: ChatCompletionRequest): ''' @@ -53,11 +57,5 @@ async def chat_completion(request: ChatCompletionRequest): created=time.time(), model=request.model, choices=[{}], - usage={ - 'prompt_tokens': 0, - 'completion_tokens': 0, - 'total_tokens': 0 - } + usage={'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0}, ) - - diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py index ba0dba6a99db..660b2000218e 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py @@ -1,14 +1,16 @@ import json - -from fastapi import APIRouter, Depends, Response, Security, status -from fastapi.responses import StreamingResponse -from pydantic import BaseModel, Field from typing import List, Dict, Iterable, AsyncIterable import logging +import time +from typing import Dict, List, Union from uuid import uuid4 +import aiohttp +import asyncio from api_v1.settings import settings +from fastapi import APIRouter, Depends, Response, Security, status, HTTPException +from fastapi.responses import StreamingResponse from gpt4all import GPT4All -import time +from pydantic import BaseModel, Field logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -16,14 +18,17 @@ ### This should follow https://github.com/openai/openai-openapi/blob/master/openapi.yaml + class CompletionRequest(BaseModel): - model: str = Field(..., description='The model to generate a completion from.') - prompt: str = Field(..., description='The prompt to begin completing from.') - max_tokens: int = Field(7, description='Max tokens to generate') - temperature: float = Field(0, description='Model temperature') - top_p: float = Field(1.0, description='top_p') - n: int = Field(1, description='') + model: str = Field(settings.model, description='The model to generate a completion from.') + prompt: Union[List[str], str] = Field(..., description='The prompt to begin completing from.') + max_tokens: int = Field(None, description='Max tokens to generate') + temperature: float = Field(settings.temp, description='Model temperature') + top_p: float = Field(settings.top_k, description='top_p') + top_k: int = Field(settings.top_k, description='top_k') + n: int = Field(1, description='How many completions to generate for each prompt') stream: bool = Field(False, description='Stream responses') + repeat_penalty: float = Field(settings.repeat_penalty, description='Repeat penalty') class CompletionChoice(BaseModel): @@ -58,7 +63,6 @@ class CompletionStreamResponse(BaseModel): router = APIRouter(prefix="/completions", tags=["Completion Endpoints"]) - def stream_completion(output: Iterable, base_response: CompletionStreamResponse): """ Streams a GPT4All output to the client. @@ -80,6 +84,27 @@ def stream_completion(output: Iterable, base_response: CompletionStreamResponse) ))] yield f"data: {json.dumps(dict(chunk))}\n\n" +async def gpu_infer(payload, header): + async with aiohttp.ClientSession() as session: + try: + async with session.post( + settings.hf_inference_server_host, headers=header, data=json.dumps(payload) + ) as response: + resp = await response.json() + return resp + + except aiohttp.ClientError as e: + # Handle client-side errors (e.g., connection error, invalid URL) + logger.error(f"Client error: {e}") + except aiohttp.ServerError as e: + # Handle server-side errors (e.g., internal server error) + logger.error(f"Server error: {e}") + except json.JSONDecodeError as e: + # Handle JSON decoding errors + logger.error(f"JSON decoding error: {e}") + except Exception as e: + # Handle other unexpected exceptions + logger.error(f"Unexpected error: {e}") @router.post("/", response_model=CompletionResponse) async def completions(request: CompletionRequest): @@ -87,42 +112,104 @@ async def completions(request: CompletionRequest): Completes a GPT4All model response. ''' - model = GPT4All(model_name=settings.model, model_path=settings.gpt4all_path) - - output = model.generate(prompt=request.prompt, - n_predict=request.max_tokens, - streaming=request.stream, - top_k=20, - top_p=request.top_p, - temp=request.temperature, - n_batch=1024, - repeat_penalty=1.2, - repeat_last_n=10) - - # If streaming, we need to return a StreamingResponse - if request.stream: - base_chunk = CompletionStreamResponse( - id=str(uuid4()), - created=time.time(), - model=request.model, - choices=[] - ) - return StreamingResponse((response for response in stream_completion(output, base_chunk)), - media_type="text/event-stream") + if request.model != settings.model: + raise HTTPException(status_code=400, detail=f"The GPT4All inference server is booted to only infer: `{settings.model}`") + + if settings.inference_mode == "gpu": + params = request.dict(exclude={'model', 'prompt', 'max_tokens', 'n'}) + params["max_new_tokens"] = request.max_tokens + params["num_return_sequences"] = request.n + + header = {"Content-Type": "application/json"} + payload = {"parameters": params} + if isinstance(request.prompt, list): + tasks = [] + for prompt in request.prompt: + payload["inputs"] = prompt + task = gpu_infer(payload, header) + tasks.append(task) + + results = await asyncio.gather(*tasks) + + choices = [] + for response in results: + scores = response["scores"] if "scores" in response else -1.0 + choices.append( + dict( + CompletionChoice( + text=response["generated_text"], index=0, logprobs=scores, finish_reason='stop' + ) + ) + ) + + return CompletionResponse( + id=str(uuid4()), + created=time.time(), + model=request.model, + choices=choices, + usage={'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0}, + ) + + else: + # If streaming, we need to return a StreamingResponse + payload["inputs"] = request.prompt + + resp = await gpu_infer(payload, header) + + output = resp["generated_text"] + # this returns all logprobs + scores = resp["scores"] if "scores" in resp else -1.0 + + return CompletionResponse( + id=str(uuid4()), + created=time.time(), + model=request.model, + choices=[dict(CompletionChoice(text=output, index=0, logprobs=scores, finish_reason='stop'))], + usage={'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0}, + ) + else: - return CompletionResponse( - id=str(uuid4()), - created=time.time(), - model=request.model, - choices=[dict(CompletionChoice( - text=output, - index=0, - logprobs=-1, - finish_reason='stop' - ))], - usage={ - 'prompt_tokens': 0, #TODO how to compute this? - 'completion_tokens': 0, - 'total_tokens': 0 - } - ) + + if isinstance(request.prompt, list): + if len(request.prompt) > 1: + raise HTTPException(status_code=400, detail="Can only infer one inference per request in CPU mode.") + else: + request.prompt = request.prompt[0] + + model = GPT4All(model_name=settings.model, model_path=settings.gpt4all_path) + + output = model.generate(prompt=request.prompt, + max_tokens=request.max_tokens, + streaming=request.stream, + top_k=request.top_k, + top_p=request.top_p, + temp=request.temperature, + ) + + # If streaming, we need to return a StreamingResponse + if request.stream: + base_chunk = CompletionStreamResponse( + id=str(uuid4()), + created=time.time(), + model=request.model, + choices=[] + ) + return StreamingResponse((response for response in stream_completion(output, base_chunk)), + media_type="text/event-stream") + else: + return CompletionResponse( + id=str(uuid4()), + created=time.time(), + model=request.model, + choices=[dict(CompletionChoice( + text=output, + index=0, + logprobs=-1, + finish_reason='stop' + ))], + usage={ + 'prompt_tokens': 0, # TODO how to compute this? + 'completion_tokens': 0, + 'total_tokens': 0 + } + ) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/engines.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/engines.py index 94c325ff40c6..539bc1ffbf5a 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/engines.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/engines.py @@ -1,22 +1,27 @@ -from fastapi import APIRouter, Depends, Response, Security, status -from pydantic import BaseModel, Field -from typing import List, Dict import logging +from typing import Dict, List + from api_v1.settings import settings +from fastapi import APIRouter, Depends, Response, Security, status +from pydantic import BaseModel, Field logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) ### This should follow https://github.com/openai/openai-openapi/blob/master/openapi.yaml + class ListEnginesResponse(BaseModel): data: List[Dict] = Field(..., description="All available models.") + class EngineResponse(BaseModel): data: List[Dict] = Field(..., description="All available models.") + router = APIRouter(prefix="/engines", tags=["Search Endpoints"]) + @router.get("/", response_model=ListEnginesResponse) async def list_engines(): ''' @@ -29,10 +34,7 @@ async def list_engines(): @router.get("/{engine_id}", response_model=EngineResponse) async def retrieve_engine(engine_id: str): - ''' - - ''' + ''' ''' raise NotImplementedError() return EngineResponse() - diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/health.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/health.py index 431880e881c1..37f30728fd00 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/health.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/health.py @@ -1,6 +1,7 @@ import logging from fastapi import APIRouter from fastapi.responses import JSONResponse + log = logging.getLogger(__name__) router = APIRouter(prefix="/health", tags=["Health"]) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/settings.py b/gpt4all-api/gpt4all_api/app/api_v1/settings.py index ee1efb1ce90f..d0e6c91be944 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/settings.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/settings.py @@ -5,6 +5,14 @@ class Settings(BaseSettings): app_environment = 'dev' model: str = 'ggml-mpt-7b-chat.bin' gpt4all_path: str = '/models' + inference_mode: str = "cpu" + hf_inference_server_host: str = "http://gpt4all_gpu:80/generate" + + temp: float = 0.18 + top_p: float = 1.0 + top_k: int = 50 + repeat_penalty: float = 1.18 + settings = Settings() diff --git a/gpt4all-api/gpt4all_api/app/main.py b/gpt4all-api/gpt4all_api/app/main.py index 131f89610667..acb1a119a8b6 100644 --- a/gpt4all-api/gpt4all_api/app/main.py +++ b/gpt4all-api/gpt4all_api/app/main.py @@ -1,19 +1,19 @@ +import logging import os + import docs -import logging +from api_v1 import events +from api_v1.api import router as v1_router +from api_v1.settings import settings from fastapi import FastAPI, HTTPException, Request -from starlette.middleware.cors import CORSMiddleware from fastapi.logger import logger as fastapi_logger -from api_v1.settings import settings -from api_v1.api import router as v1_router -from api_v1 import events -import os +from starlette.middleware.cors import CORSMiddleware logger = logging.getLogger(__name__) app = FastAPI(title='GPT4All API', description=docs.desc) -#CORS Configuration (in-case you want to deploy) +# CORS Configuration (in-case you want to deploy) app.add_middleware( CORSMiddleware, allow_origins=["*"], @@ -29,14 +29,23 @@ app.add_event_handler('startup', events.startup_event_handler(app)) app.add_exception_handler(HTTPException, events.on_http_error) + @app.on_event("startup") async def startup(): global model - logger.info(f"Downloading/fetching model: {os.path.join(settings.gpt4all_path, settings.model)}") - from gpt4all import GPT4All - model = GPT4All(model_name=settings.model, model_path=settings.gpt4all_path) + if settings.inference_mode == "cpu": + logger.info(f"Downloading/fetching model: {os.path.join(settings.gpt4all_path, settings.model)}") + from gpt4all import GPT4All + + model = GPT4All(model_name=settings.model, model_path=settings.gpt4all_path) + + logger.info(f"GPT4All API is ready to infer from {settings.model} on CPU.") + + else: + # is it possible to do this once the server is up? + ## TODO block until HF inference server is up. + logger.info(f"GPT4All API is ready to infer from {settings.model} on CPU.") - logger.info("GPT4All API is ready.") @app.on_event("shutdown") async def shutdown(): @@ -57,5 +66,7 @@ async def shutdown(): uvicorn_logger.handlers = gunicorn_error_logger.handlers else: # https://github.com/tiangolo/fastapi/issues/2019 - LOG_FORMAT2 = "[%(asctime)s %(process)d:%(threadName)s] %(name)s - %(levelname)s - %(message)s | %(filename)s:%(lineno)d" - logging.basicConfig(level=logging.INFO, format=LOG_FORMAT2) \ No newline at end of file + LOG_FORMAT2 = ( + "[%(asctime)s %(process)d:%(threadName)s] %(name)s - %(levelname)s - %(message)s | %(filename)s:%(lineno)d" + ) + logging.basicConfig(level=logging.INFO, format=LOG_FORMAT2) diff --git a/gpt4all-api/gpt4all_api/app/tests/test_endpoints.py b/gpt4all-api/gpt4all_api/app/tests/test_endpoints.py index fad9bd24c7ee..f9315cb35288 100644 --- a/gpt4all-api/gpt4all_api/app/tests/test_endpoints.py +++ b/gpt4all-api/gpt4all_api/app/tests/test_endpoints.py @@ -2,30 +2,22 @@ Use the OpenAI python API to test gpt4all models. """ import openai + openai.api_base = "http://localhost:4891/v1" openai.api_key = "not needed for a local LLM" def test_completion(): - model = "gpt4all-j-v1.3-groovy" + model = "ggml-mpt-7b-chat.bin" prompt = "Who is Michael Jordan?" response = openai.Completion.create( - model=model, - prompt=prompt, - max_tokens=50, - temperature=0.28, - top_p=0.95, - n=1, - echo=True, - stream=False + model=model, prompt=prompt, max_tokens=50, temperature=0.28, top_p=0.95, n=1, echo=True, stream=False ) assert len(response['choices'][0]['text']) > len(prompt) - print(response) - def test_streaming_completion(): - model = "gpt4all-j-v1.3-groovy" + model = "ggml-mpt-7b-chat.bin" prompt = "Who is Michael Jordan?" tokens = [] for resp in openai.Completion.create( @@ -42,10 +34,12 @@ def test_streaming_completion(): assert (len(tokens) > 0) assert (len("".join(tokens)) > len(prompt)) -# def test_chat_completions(): -# model = "gpt4all-j-v1.3-groovy" -# prompt = "Who is Michael Jordan?" -# response = openai.ChatCompletion.create( -# model=model, -# messages=[] -# ) + +def test_batched_completion(): + model = "ggml-mpt-7b-chat.bin" + prompt = "Who is Michael Jordan?" + response = openai.Completion.create( + model=model, prompt=[prompt] * 3, max_tokens=50, temperature=0.28, top_p=0.95, n=1, echo=True, stream=False + ) + assert len(response['choices'][0]['text']) > len(prompt) + assert len(response['choices']) == 3 diff --git a/gpt4all-api/gpt4all_api/requirements.txt b/gpt4all-api/gpt4all_api/requirements.txt index af33bdd8348f..f7c7ed533df6 100644 --- a/gpt4all-api/gpt4all_api/requirements.txt +++ b/gpt4all-api/gpt4all_api/requirements.txt @@ -1,10 +1,12 @@ aiohttp>=3.6.2 aiofiles -pydantic>=1.4.0 +pydantic>=1.4.0,<2.0.0 requests>=2.24.0 ujson>=2.0.2 fastapi>=0.95.0 Jinja2>=3.0 -gpt4all==1.0.1 +gpt4all>=1.0.0 pytest -openai \ No newline at end of file +openai +black +isort \ No newline at end of file diff --git a/gpt4all-api/makefile b/gpt4all-api/makefile index 606c3e0a9a83..66420e455f73 100644 --- a/gpt4all-api/makefile +++ b/gpt4all-api/makefile @@ -1,22 +1,26 @@ ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) APP_NAME:=gpt4all_api PYTHON:=python3.8 +SHELL := /bin/bash all: dependencies fresh: clean dependencies testenv: clean_testenv test_build - docker compose up --build + docker compose -f docker-compose.yaml up --build + +testenv_gpu: clean_testenv test_build + docker compose -f docker-compose.yaml -f docker-compose.gpu.yaml up --build testenv_d: clean_testenv test_build docker compose up --build -d test: - docker compose exec gpt4all_api pytest -svv --disable-warnings -p no:cacheprovider /app/tests + docker compose exec $(APP_NAME) pytest -svv --disable-warnings -p no:cacheprovider /app/tests test_build: - DOCKER_BUILDKIT=1 docker build -t gpt4all_api --progress plain -f gpt4all_api/Dockerfile.buildkit . + DOCKER_BUILDKIT=1 docker build -t $(APP_NAME) --progress plain -f $(APP_NAME)/Dockerfile.buildkit . clean_testenv: docker compose down -v @@ -27,7 +31,7 @@ venv: if [ ! -d $(ROOT_DIR)/env ]; then $(PYTHON) -m venv $(ROOT_DIR)/env; fi dependencies: venv - source $(ROOT_DIR)/env/bin/activate; yes w | python -m pip install -r $(ROOT_DIR)/gpt4all_api/requirements.txt + source $(ROOT_DIR)/env/bin/activate; $(PYTHON) -m pip install -r $(ROOT_DIR)/$(APP_NAME)/requirements.txt clean: clean_testenv # Remove existing environment @@ -35,3 +39,8 @@ clean: clean_testenv rm -rf $(ROOT_DIR)/$(APP_NAME)/*.pyc; +black: + source $(ROOT_DIR)/env/bin/activate; black -l 120 -S --target-version py38 $(APP_NAME) + +isort: + source $(ROOT_DIR)/env/bin/activate; isort --ignore-whitespace --atomic -w 120 $(APP_NAME) \ No newline at end of file From 3d101103148c6ac6ec1a5fb84520aae515e7cd24 Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Mon, 24 Jul 2023 11:34:50 -0400 Subject: [PATCH 161/198] Moved model check into cpu only paths --- gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py index 660b2000218e..5df9ddb72846 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py @@ -111,10 +111,6 @@ async def completions(request: CompletionRequest): ''' Completes a GPT4All model response. ''' - - if request.model != settings.model: - raise HTTPException(status_code=400, detail=f"The GPT4All inference server is booted to only infer: `{settings.model}`") - if settings.inference_mode == "gpu": params = request.dict(exclude={'model', 'prompt', 'max_tokens', 'n'}) params["max_new_tokens"] = request.max_tokens @@ -170,6 +166,10 @@ async def completions(request: CompletionRequest): else: + if request.model != settings.model: + raise HTTPException(status_code=400, + detail=f"The GPT4All inference server is booted to only infer: `{settings.model}`") + if isinstance(request.prompt, list): if len(request.prompt) > 1: raise HTTPException(status_code=400, detail="Can only infer one inference per request in CPU mode.") From 2befff83d68bb031e3c6a79fb4a37a8e17614d76 Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Mon, 24 Jul 2023 12:01:37 -0400 Subject: [PATCH 162/198] top_p error in gpt4all-api --- gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py index 5df9ddb72846..31ed82ed9e97 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py @@ -24,7 +24,7 @@ class CompletionRequest(BaseModel): prompt: Union[List[str], str] = Field(..., description='The prompt to begin completing from.') max_tokens: int = Field(None, description='Max tokens to generate') temperature: float = Field(settings.temp, description='Model temperature') - top_p: float = Field(settings.top_k, description='top_p') + top_p: float = Field(settings.top_p, description='top_p') top_k: int = Field(settings.top_k, description='top_k') n: int = Field(1, description='How many completions to generate for each prompt') stream: bool = Field(False, description='Stream responses') From 6431d4677690565f633e7533662acc91ce9102d2 Mon Sep 17 00:00:00 2001 From: cosmic-snow <134004613+cosmic-snow@users.noreply.github.com> Date: Mon, 24 Jul 2023 18:57:06 +0200 Subject: [PATCH 163/198] Fix models not getting downloaded in Python bindings (#1262) - custom callbacks & session improvements PR (v1.0.6) had one too many checks - remove the problematic config['url'] check - add a crude test - fixes #1261 --- gpt4all-bindings/python/gpt4all/gpt4all.py | 4 ---- .../python/gpt4all/tests/test_gpt4all.py | 13 +++++++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/gpt4all-bindings/python/gpt4all/gpt4all.py b/gpt4all-bindings/python/gpt4all/gpt4all.py index ed6427312418..62af9503fe82 100644 --- a/gpt4all-bindings/python/gpt4all/gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/gpt4all.py @@ -168,10 +168,6 @@ def retrieve_model( # If model file does not exist, download elif allow_download: - # Make sure valid model filename before attempting download - - if "url" not in config: - raise ValueError(f"Model filename not in model list: {model_filename}") url = config.pop("url", None) config["path"] = GPT4All.download_model( diff --git a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py index fa798c0c3b5e..89e81086dbc8 100644 --- a/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py +++ b/gpt4all-bindings/python/gpt4all/tests/test_gpt4all.py @@ -1,5 +1,6 @@ import sys from io import StringIO +from pathlib import Path from gpt4all import GPT4All, Embed4All import time @@ -114,3 +115,15 @@ def test_empty_embedding(): embedder = Embed4All() with pytest.raises(ValueError): output = embedder.embed(text) + +def test_download_model(tmp_path: Path): + import gpt4all.gpt4all + old_default_dir = gpt4all.gpt4all.DEFAULT_MODEL_DIRECTORY + gpt4all.gpt4all.DEFAULT_MODEL_DIRECTORY = tmp_path # temporary pytest directory to ensure a download happens + try: + model = GPT4All(model_name='ggml-all-MiniLM-L6-v2-f16.bin') + model_path = tmp_path / model.config['filename'] + assert model_path.absolute() == Path(model.config['path']).absolute() + assert model_path.stat().st_size == int(model.config['filesize']) + finally: + gpt4all.gpt4all.DEFAULT_MODEL_DIRECTORY = old_default_dir From 41f640577c615fa02d6eebd664e514645867841c Mon Sep 17 00:00:00 2001 From: Andriy Mulyar Date: Mon, 24 Jul 2023 14:25:04 -0400 Subject: [PATCH 164/198] Update setup.py (#1263) Signed-off-by: Andriy Mulyar --- gpt4all-bindings/python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt4all-bindings/python/setup.py b/gpt4all-bindings/python/setup.py index 8fbab0f3079a..17325b0e0d8c 100644 --- a/gpt4all-bindings/python/setup.py +++ b/gpt4all-bindings/python/setup.py @@ -61,7 +61,7 @@ def copy_prebuilt_C_lib(src_dir, dest_dir, dest_build_dir): setup( name=package_name, - version="1.0.6", + version="1.0.7", description="Python bindings for GPT4All", author="Richard Guo", author_email="richard@nomic.ai", From b3f84c56e71c43d0af8447a85db7cdca1e155021 Mon Sep 17 00:00:00 2001 From: Zach Nussbaum Date: Mon, 24 Jul 2023 14:28:12 -0500 Subject: [PATCH 165/198] fix: don't pass around the same dict object (#1264) --- gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py index 31ed82ed9e97..700650a5e811 100644 --- a/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py +++ b/gpt4all-api/gpt4all_api/app/api_v1/routes/completions.py @@ -117,14 +117,13 @@ async def completions(request: CompletionRequest): params["num_return_sequences"] = request.n header = {"Content-Type": "application/json"} - payload = {"parameters": params} if isinstance(request.prompt, list): tasks = [] for prompt in request.prompt: + payload = {"parameters": params} payload["inputs"] = prompt task = gpu_infer(payload, header) tasks.append(task) - results = await asyncio.gather(*tasks) choices = [] @@ -147,6 +146,7 @@ async def completions(request: CompletionRequest): ) else: + payload = {"parameters": params} # If streaming, we need to return a StreamingResponse payload["inputs"] = request.prompt From 545c23b4bd240d165c447860c9dc2affcbea8d98 Mon Sep 17 00:00:00 2001 From: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Date: Tue, 25 Jul 2023 10:46:40 -0500 Subject: [PATCH 166/198] typescript: fix final bugs and polishing, circle ci documentation (#960) * fix: esm and cjs compatibility Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * Update prebuild.js Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * fix gpt4all.js Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * Fix compile for windows and linux again. PLEASE DON'T REVERT THISgit gui! * version bump * polish up spec and build scripts * lock file refresh * fix: proper resource closing and error handling * check make sure libPath not null * add msvc build script and update readme requirements * python workflows in circleci * dummy python change * no need for main * second hold for pypi deploy * let me deploy pls * bring back when condition * Typo, ignore list (#967) Fix typo in javadoc, Add word to ignore list for codespellrc --------- Co-authored-by: felix * llmodel: change tokenToString to not use string_view (#968) fixes a definite use-after-free and likely avoids some other potential ones - std::string will convert to a std::string_view automatically but as soon as the std::string in question goes out of scope it is already freed and the string_view is pointing at freed memory - this is *mostly* fine if its returning a reference to the tokenizer's internal vocab table but it's, imo, too easy to return a reference to a dynamically constructed string with this as replit is doing (and unfortunately needs to do to convert the internal whitespace replacement symbol back to a space) * Initial Library Loader for .NET Bindings / Update bindings to support newest changes (#763) * Initial Library Loader * Load library as part of Model factory * Dynamically search and find the dlls * Update tests to use locally built runtimes * Fix dylib loading, add macos runtime support for sample/tests * Bypass automatic loading by default. * Only set CMAKE_OSX_ARCHITECTURES if not already set, allow cross-compile * Switch Loading again * Update build scripts for mac/linux * Update bindings to support newest breaking changes * Fix build * Use llmodel for Windows * Actually, it does need to be libllmodel * Name * Remove TFMs, bypass loading by default * Fix script * Delete mac script --------- Co-authored-by: Tim Miller * bump llama.cpp mainline to latest (#964) * fix prompt context so it's preserved in class * update setup.py * metal replit (#931) metal+replit makes replit work with Metal and removes its use of `mem_per_token` in favor of fixed size scratch buffers (closer to llama.cpp) * update documentation scripts and generation to include readme.md * update readme and documentation for source * begin tests, import jest, fix listModels export * fix typo * chore: update spec * fix: finally, reduced potential of empty string * chore: add stub for createTokenSream * refactor: protecting resources properly * add basic jest tests * update * update readme * refactor: namespace the res variable * circleci integration to automatically build docs * add starter docs * typo * more circle ci typo * forgot to add nodejs circle ci orb * fix circle ci * feat: @iimez verify download and fix prebuild script * fix: oops, option name wrong * fix: gpt4all utils not emitting docs * chore: fix up scripts * fix: update docs and typings for md5 sum * fix: macos compilation * some refactoring * Update index.cc Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * update readme and enable exceptions on mac * circle ci progress * basic embedding with sbert (not tested & cpp side only) * fix circle ci * fix circle ci * update circle ci script * bruh * fix again * fix * fixed required workflows * fix ci * fix pwd * fix pwd * update ci * revert * fix * prevent rebuild * revmove noop * Update continue_config.yml Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * Update binding.gyp Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * fix fs not found * remove cpp 20 standard * fix warnings, safer way to calculate arrsize * readd build backend * basic embeddings and yarn test" * fix circle ci Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Update continue_config.yml Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> fix macos paths update readme and roadmap split up spec update readme check for url in modelsjson update docs and inline stuff update yarn configuration and readme update readme readd npm publish script add exceptions bruh one space broke the yaml codespell oops forgot to add runtimes folder bump version try code snippet https://support.circleci.com/hc/en-us/articles/8325075309339-How-to-install-NPM-on-Windows-images add fallback for unknown architectures attached to wrong workspace hopefuly fix moving everything under backend to persist should work now * update circle ci script * prevent rebuild * revmove noop * Update continue_config.yml Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * Update binding.gyp Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> * fix fs not found * remove cpp 20 standard * fix warnings, safer way to calculate arrsize * readd build backend * basic embeddings and yarn test" * fix circle ci Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Update continue_config.yml Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> fix macos paths update readme and roadmap split up spec update readme check for url in modelsjson update docs and inline stuff update yarn configuration and readme update readme readd npm publish script add exceptions bruh one space broke the yaml codespell oops forgot to add runtimes folder bump version try code snippet https://support.circleci.com/hc/en-us/articles/8325075309339-How-to-install-NPM-on-Windows-images add fallback for unknown architectures attached to wrong workspace hopefuly fix moving everything under backend to persist should work now * Update README.md Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> --------- Signed-off-by: Jacob Nguyen <76754747+jacoobes@users.noreply.github.com> Co-authored-by: Adam Treat Co-authored-by: Richard Guo Co-authored-by: Felix Zaslavskiy Co-authored-by: felix Co-authored-by: Aaron Miller Co-authored-by: Tim Miller Co-authored-by: Tim Miller --- .circleci/config.yml | 1 + .circleci/continue_config.yml | 221 +- .../python/docs/gpt4all_typescript.md | 670 ++ gpt4all-bindings/python/mkdocs.yml | 1 + gpt4all-bindings/typescript/.gitignore | 7 + .../typescript/.yarn/releases/yarn-3.6.1.cjs | 874 ++ gpt4all-bindings/typescript/.yarnrc.yml | 1 + gpt4all-bindings/typescript/README.md | 156 +- gpt4all-bindings/typescript/binding.ci.gyp | 62 + gpt4all-bindings/typescript/binding.gyp | 10 +- gpt4all-bindings/typescript/docs/api.md | 623 -- gpt4all-bindings/typescript/index.cc | 35 +- gpt4all-bindings/typescript/index.h | 3 +- gpt4all-bindings/typescript/package.json | 31 +- gpt4all-bindings/typescript/prompt.cc | 26 +- gpt4all-bindings/typescript/prompt.h | 8 +- gpt4all-bindings/typescript/scripts/build.js | 6 +- .../typescript/scripts/build_msvc.bat | 33 + gpt4all-bindings/typescript/scripts/docs.js | 8 + .../typescript/scripts/prebuild.js | 28 +- gpt4all-bindings/typescript/spec/chat.mjs | 65 + gpt4all-bindings/typescript/spec/embed.mjs | 8 + gpt4all-bindings/typescript/spec/index.mjs | 46 - gpt4all-bindings/typescript/src/gpt4all.d.ts | 114 +- gpt4all-bindings/typescript/src/gpt4all.js | 57 +- gpt4all-bindings/typescript/src/util.d.ts | 69 - gpt4all-bindings/typescript/src/util.js | 110 +- .../typescript/test/gpt4all.test.js | 79 + gpt4all-bindings/typescript/test/index.mjs | 8 - gpt4all-bindings/typescript/yarn.lock | 7936 ++++++++++++----- 30 files changed, 8091 insertions(+), 3205 deletions(-) create mode 100644 gpt4all-bindings/python/docs/gpt4all_typescript.md create mode 100644 gpt4all-bindings/typescript/.yarn/releases/yarn-3.6.1.cjs create mode 100644 gpt4all-bindings/typescript/.yarnrc.yml create mode 100644 gpt4all-bindings/typescript/binding.ci.gyp delete mode 100644 gpt4all-bindings/typescript/docs/api.md create mode 100644 gpt4all-bindings/typescript/scripts/build_msvc.bat create mode 100644 gpt4all-bindings/typescript/scripts/docs.js create mode 100644 gpt4all-bindings/typescript/spec/chat.mjs create mode 100644 gpt4all-bindings/typescript/spec/embed.mjs delete mode 100644 gpt4all-bindings/typescript/spec/index.mjs delete mode 100644 gpt4all-bindings/typescript/src/util.d.ts create mode 100644 gpt4all-bindings/typescript/test/gpt4all.test.js delete mode 100644 gpt4all-bindings/typescript/test/index.mjs diff --git a/.circleci/config.yml b/.circleci/config.yml index dc76e62fe268..fb6525c58a2a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,6 +12,7 @@ workflows: config-path: .circleci/continue_config.yml mapping: | gpt4all-bindings/python/.* run-python-workflow true + gpt4all-bindings/typescript/.* run-ts-workflow true gpt4all-bindings/csharp/.* run-csharp-workflow true gpt4all-backend/.* run-chat-workflow true gpt4all-chat/.* run-chat-workflow true diff --git a/.circleci/continue_config.yml b/.circleci/continue_config.yml index f17636fdaff6..c67b36c46543 100644 --- a/.circleci/continue_config.yml +++ b/.circleci/continue_config.yml @@ -2,6 +2,7 @@ version: 2.1 orbs: win: circleci/windows@5.0 python: circleci/python@1.2 + node: circleci/node@5.1 parameters: run-default-workflow: @@ -13,6 +14,9 @@ parameters: run-chat-workflow: type: boolean default: false + run-ts-workflow: + type: boolean + default: false run-csharp-workflow: type: boolean default: false @@ -156,12 +160,26 @@ jobs: -S ../gpt4all-chat \ -B . ~/Qt/Tools/CMake/CMake.app/Contents/bin/cmake --build . --target all - + build-ts-docs: + docker: + - image: cimg/base:stable + steps: + - checkout + - node/install: + install-yarn: true + node-version: "18.16" + - run: node --version + - node/install-packages: + pkg-manager: yarn + - run: + name: build docs ts yo + command: yarn docs:build + build-py-docs: docker: - image: circleci/python:3.8 steps: - - checkout + - checkout - run: name: Install dependencies command: | @@ -612,6 +630,160 @@ jobs: - store_artifacts: path: gpt4all-bindings/csharp/Gpt4All/bin/Release + build-nodejs-linux: + docker: + - image: cimg/base:stable + steps: + - checkout + - attach_workspace: + at: /tmp/gpt4all-backend + - node/install: + install-yarn: true + node-version: "18.16" + - run: node --version + - node/install-packages: + app-dir: gpt4all-bindings/typescript + pkg-manager: yarn + - run: + command: | + cd gpt4all-bindings/typescript + yarn prebuildify -t 18.16.0 --napi + - run: + command: | + mkdir -p gpt4all-backend/prebuilds/linux-x64 + mkdir -p gpt4all-backend/runtimes/linux-x64 + cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so gpt4all-backend/runtimes/linux-x64 + cp gpt4all-bindings/typescript/prebuilds/linux-x64/*.node gpt4all-backend/prebuilds/linux-x64 + - persist_to_workspace: + root: gpt4all-backend + paths: + - prebuilds/linux-x64/*.node + - runtimes/linux-x64/*-*.so + build-nodejs-macos: + macos: + xcode: "14.0.0" + steps: + - checkout + - attach_workspace: + at: /tmp/gpt4all-backend + - node/install: + install-yarn: true + node-version: "18.16" + - run: node --version + - node/install-packages: + app-dir: gpt4all-bindings/typescript + pkg-manager: yarn + - run: + command: | + cd gpt4all-bindings/typescript + yarn prebuildify -t 18.16.0 --napi + - run: + name: "Persisting all necessary things to workspace" + command: | + mkdir -p gpt4all-backend/prebuilds/darwin-x64 + mkdir -p gpt4all-backend/runtimes/darwin-x64 + cp /tmp/gpt4all-backend/runtimes/osx-x64/*-*.* gpt4all-backend/runtimes/darwin-x64 + cp gpt4all-bindings/typescript/prebuilds/darwin-x64/*.node gpt4all-backend/prebuilds/darwin-x64 + - persist_to_workspace: + root: gpt4all-backend + paths: + - prebuilds/darwin-x64/*.node + - runtimes/darwin-x64/*-*.* + + build-nodejs-windows: + executor: + name: win/default + size: large + shell: powershell.exe -ExecutionPolicy Bypass + steps: + - checkout + - attach_workspace: + at: /tmp/gpt4all-backend + - run: choco install wget -y + - run: + command: wget https://nodejs.org/dist/v18.16.0/node-v18.16.0-x86.msi -P C:\Users\circleci\Downloads\ + shell: cmd.exe + - run: MsiExec.exe /i C:\Users\circleci\Downloads\node-v18.16.0-x86.msi /qn + - run: + command: | + Start-Process powershell -verb runAs -Args "-start GeneralProfile" + nvm install 18.16.0 + nvm use 18.16.0 + - run: node --version + - run: + command: | + npm install -g yarn + cd gpt4all-bindings/typescript + yarn install + - run: + command: | + cd gpt4all-bindings/typescript + yarn prebuildify -t 18.16.0 --napi + - run: + command: | + mkdir -p gpt4all-backend/prebuilds/win32-x64 + mkdir -p gpt4all-backend/runtimes/win32-x64 + cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll gpt4all-backend/runtimes/win32-x64 + cp gpt4all-bindings/typescript/prebuilds/win32-x64/*.node gpt4all-backend/prebuilds/win32-x64 + + - persist_to_workspace: + root: gpt4all-backend + paths: + - prebuilds/win32-x64/*.node + - runtimes/win32-x64/*-*.dll + + prepare-npm-pkg: + docker: + - image: cimg/base:stable + steps: + - attach_workspace: + at: /tmp/gpt4all-backend + - checkout + - node/install: + install-yarn: true + node-version: "18.16" + - run: node --version + - run: + command: | + cd gpt4all-bindings/typescript + # excluding llmodel. nodejs bindings dont need llmodel.dll + mkdir -p runtimes/win32-x64/native + mkdir -p prebuilds/win32-x64/ + cp /tmp/gpt4all-backend/runtimes/win-x64_msvc/*-*.dll runtimes/win32-x64/native/ + cp /tmp/gpt4all-backend/prebuilds/win32-x64/*.node prebuilds/win32-x64/ + + mkdir -p runtimes/linux-x64/native + mkdir -p prebuilds/linux-x64/ + cp /tmp/gpt4all-backend/runtimes/linux-x64/*-*.so runtimes/linux-x64/native/ + cp /tmp/gpt4all-backend/prebuilds/linux-x64/*.node prebuilds/linux-x64/ + + mkdir -p runtimes/darwin-x64/native + mkdir -p prebuilds/darwin-x64/ + cp /tmp/gpt4all-backend/runtimes/darwin-x64/*-*.* runtimes/darwin-x64/native/ + cp /tmp/gpt4all-backend/prebuilds/darwin-x64/*.node prebuilds/darwin-x64/ + + # Fallback build if user is not on above prebuilds + mv -f binding.ci.gyp binding.gyp + + mkdir gpt4all-backend + cd ../../gpt4all-backend + mv llmodel.h llmodel.cpp llmodel_c.cpp llmodel_c.h sysinfo.h dlhandle.h ../gpt4all-bindings/typescript/gpt4all-backend/ + + # Test install + - node/install-packages: + app-dir: gpt4all-bindings/typescript + pkg-manager: yarn + override-ci-command: yarn install + - run: + command: | + cd gpt4all-bindings/typescript + yarn run test + - run: + command: | + cd gpt4all-bindings/typescript + npm set //registry.npmjs.org/:_authToken=$NPM_TOKEN + npm publish --access public --tag alpha + workflows: version: 2 default: @@ -635,6 +807,11 @@ workflows: deploy-docs: when: << pipeline.parameters.run-python-workflow >> jobs: + - build-ts-docs: + filters: + branches: + only: + - main - build-py-docs: filters: branches: @@ -679,11 +856,14 @@ workflows: or: - << pipeline.parameters.run-python-workflow >> - << pipeline.parameters.run-csharp-workflow >> + - << pipeline.parameters.run-ts-workflow >> jobs: - hold: type: approval - nuget-hold: type: approval + - npm-hold: + type: approval - build-bindings-backend-linux: filters: branches: @@ -708,6 +888,41 @@ workflows: only: requires: - hold + + # NodeJs Jobs + - prepare-npm-pkg: + filters: + branches: + only: + requires: + - npm-hold + - build-nodejs-linux + - build-nodejs-windows + - build-nodejs-macos + - build-nodejs-linux: + filters: + branches: + only: + requires: + - npm-hold + - build-bindings-backend-linux + - build-nodejs-windows: + filters: + branches: + only: + requires: + - npm-hold + - build-bindings-backend-windows-msvc + - build-nodejs-macos: + filters: + branches: + only: + requires: + - npm-hold + - build-bindings-backend-macos + + + # CSharp Jobs - build-csharp-linux: filters: branches: @@ -719,7 +934,7 @@ workflows: branches: only: requires: - - build-bindings-backend-windows + - build-bindings-backend-windows - build-csharp-macos: filters: branches: diff --git a/gpt4all-bindings/python/docs/gpt4all_typescript.md b/gpt4all-bindings/python/docs/gpt4all_typescript.md new file mode 100644 index 000000000000..7db0edd53bb5 --- /dev/null +++ b/gpt4all-bindings/python/docs/gpt4all_typescript.md @@ -0,0 +1,670 @@ +# GPT4All Node.js API + +```sh +yarn install gpt4all@alpha + +npm install gpt4all@alpha + +pnpm install gpt4all@alpha +``` + +The original [GPT4All typescript bindings](https://github.com/nomic-ai/gpt4all-ts) are now out of date. + +* New bindings created by [jacoobes](https://github.com/jacoobes) and the [nomic ai community](https://home.nomic.ai) :D, for all to use. +* [Documentation](#Documentation) + +### Code (alpha) + +```js +import { createCompletion, loadModel } from '../src/gpt4all.js' + +const ll = await loadModel('ggml-vicuna-7b-1.1-q4_2.bin', { verbose: true }); + +const response = await createCompletion(ll, [ + { role : 'system', content: 'You are meant to be annoying and unhelpful.' }, + { role : 'user', content: 'What is 1 + 1?' } +]); + +``` + +### API + +* The nodejs api has made strides to mirror the python api. It is not 100% mirrored, but many pieces of the api resemble its python counterpart. +* Everything should work out the box. +* [docs](./docs/api.md) + +### Build Instructions + +* As of 05/21/2023, Tested on windows (MSVC). (somehow got it to work on MSVC 🤯) + * binding.gyp is compile config +* Tested on Ubuntu. Everything seems to work fine +* MingW works as well to build the gpt4all-backend. **HOWEVER**, this package works only with MSVC built dlls. + +### Requirements + +* git +* [node.js >= 18.0.0](https://nodejs.org/en) +* [yarn](https://yarnpkg.com/) +* [node-gyp](https://github.com/nodejs/node-gyp) + * all of its requirements. +* (unix) gcc version 12 + * These bindings use the C++ 20 standard. +* (win) msvc version 143 + * Can be obtained with visual studio 2022 build tools + +### Build + +```sh +git clone https://github.com/nomic-ai/gpt4all.git +cd gpt4all-bindings/typescript +``` + +* The below shell commands assume the current working directory is `typescript`. + +* To Build and Rebuild: + +```sh +yarn +``` + +* llama.cpp git submodule for gpt4all can be possibly absent. If this is the case, make sure to run in llama.cpp parent directory + +```sh +git submodule update --init --depth 1 --recursive +``` + +**AS OF NEW BACKEND** to build the backend, + +```sh +yarn build:backend +``` + +This will build platform-dependent dynamic libraries, and will be located in runtimes/(platform)/native The only current way to use them is to put them in the current working directory of your application. That is, **WHEREVER YOU RUN YOUR NODE APPLICATION** + +* llama-xxxx.dll is required. +* According to whatever model you are using, you'll need to select the proper model loader. + * For example, if you running an Mosaic MPT model, you will need to select the mpt-(buildvariant).(dynamiclibrary) + +### Test + +```sh +yarn test +``` + +### Source Overview + +#### src/ + +* Extra functions to help aid devex +* Typings for the native node addon +* the javascript interface + +#### test/ + +* simple unit testings for some functions exported. +* more advanced ai testing is not handled + +#### spec/ + +* Average look and feel of the api +* Should work assuming a model and libraries are installed locally in working directory + +#### index.cc + +* The bridge between nodejs and c. Where the bindings are. + +#### prompt.cc + +* Handling prompting and inference of models in a threadsafe, asynchronous way. + +#### docs/ + +* Autogenerated documentation using the script `yarn docs:build` + +### Roadmap + +This package is in active development, and breaking changes may happen until the api stabilizes. Here's what's the todo list: + +* \[x] prompt models via a threadsafe function in order to have proper non blocking behavior in nodejs +* \[ ] createTokenStream, an async iterator that streams each token emitted from the model. Planning on following this [example](https://github.com/nodejs/node-addon-examples/tree/main/threadsafe-async-iterator) +* \[ ] proper unit testing (integrate with circle ci) +* \[ ] publish to npm under alpha tag `gpt4all@alpha` +* \[ ] have more people test on other platforms (mac tester needed) +* \[x] switch to new pluggable backend + +### Documentation + + + +##### Table of Contents + +* [ModelType](#modeltype) +* [ModelFile](#modelfile) + * [gptj](#gptj) + * [llama](#llama) + * [mpt](#mpt) + * [replit](#replit) +* [type](#type) +* [LLModel](#llmodel) + * [constructor](#constructor) + * [Parameters](#parameters) + * [type](#type-1) + * [name](#name) + * [stateSize](#statesize) + * [threadCount](#threadcount) + * [setThreadCount](#setthreadcount) + * [Parameters](#parameters-1) + * [raw\_prompt](#raw_prompt) + * [Parameters](#parameters-2) + * [embed](#embed) + * [Parameters](#parameters-3) + * [isModelLoaded](#ismodelloaded) + * [setLibraryPath](#setlibrarypath) + * [Parameters](#parameters-4) + * [getLibraryPath](#getlibrarypath) +* [loadModel](#loadmodel) + * [Parameters](#parameters-5) +* [createCompletion](#createcompletion) + * [Parameters](#parameters-6) + * [Examples](#examples) +* [createEmbedding](#createembedding) + * [Parameters](#parameters-7) +* [CompletionOptions](#completionoptions) + * [verbose](#verbose) + * [hasDefaultHeader](#hasdefaultheader) + * [hasDefaultFooter](#hasdefaultfooter) +* [PromptMessage](#promptmessage) + * [role](#role) + * [content](#content) +* [prompt\_tokens](#prompt_tokens) +* [completion\_tokens](#completion_tokens) +* [total\_tokens](#total_tokens) +* [CompletionReturn](#completionreturn) + * [model](#model) + * [usage](#usage) + * [choices](#choices) +* [CompletionChoice](#completionchoice) + * [message](#message) +* [LLModelPromptContext](#llmodelpromptcontext) + * [logits\_size](#logits_size) + * [tokens\_size](#tokens_size) + * [n\_past](#n_past) + * [n\_ctx](#n_ctx) + * [n\_predict](#n_predict) + * [top\_k](#top_k) + * [top\_p](#top_p) + * [temp](#temp) + * [n\_batch](#n_batch) + * [repeat\_penalty](#repeat_penalty) + * [repeat\_last\_n](#repeat_last_n) + * [context\_erase](#context_erase) +* [createTokenStream](#createtokenstream) + * [Parameters](#parameters-8) +* [DEFAULT\_DIRECTORY](#default_directory) +* [DEFAULT\_LIBRARIES\_DIRECTORY](#default_libraries_directory) +* [downloadModel](#downloadmodel) + * [Parameters](#parameters-9) + * [Examples](#examples-1) +* [DownloadModelOptions](#downloadmodeloptions) + * [modelPath](#modelpath) + * [debug](#debug) + * [url](#url) + * [md5sum](#md5sum) +* [DownloadController](#downloadcontroller) + * [cancel](#cancel) + * [promise](#promise) + +#### ModelType + +Type of the model + +Type: (`"gptj"` | `"llama"` | `"mpt"` | `"replit"`) + +#### ModelFile + +Full list of models available + +##### gptj + +List of GPT-J Models + +Type: (`"ggml-gpt4all-j-v1.3-groovy.bin"` | `"ggml-gpt4all-j-v1.2-jazzy.bin"` | `"ggml-gpt4all-j-v1.1-breezy.bin"` | `"ggml-gpt4all-j.bin"`) + +##### llama + +List Llama Models + +Type: (`"ggml-gpt4all-l13b-snoozy.bin"` | `"ggml-vicuna-7b-1.1-q4_2.bin"` | `"ggml-vicuna-13b-1.1-q4_2.bin"` | `"ggml-wizardLM-7B.q4_2.bin"` | `"ggml-stable-vicuna-13B.q4_2.bin"` | `"ggml-nous-gpt4-vicuna-13b.bin"` | `"ggml-v3-13b-hermes-q5_1.bin"`) + +##### mpt + +List of MPT Models + +Type: (`"ggml-mpt-7b-base.bin"` | `"ggml-mpt-7b-chat.bin"` | `"ggml-mpt-7b-instruct.bin"`) + +##### replit + +List of Replit Models + +Type: `"ggml-replit-code-v1-3b.bin"` + +#### type + +Model architecture. This argument currently does not have any functionality and is just used as descriptive identifier for user. + +Type: [ModelType](#modeltype) + +#### LLModel + +LLModel class representing a language model. +This is a base class that provides common functionality for different types of language models. + +##### constructor + +Initialize a new LLModel. + +###### Parameters + +* `path` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** Absolute path to the model file. + + + +* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model file does not exist. + +##### type + +either 'gpt', mpt', or 'llama' or undefined + +Returns **([ModelType](#modeltype) | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))** + +##### name + +The name of the model. + +Returns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + +##### stateSize + +Get the size of the internal state of the model. +NOTE: This state data is specific to the type of model you have created. + +Returns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** the size in bytes of the internal state of the model + +##### threadCount + +Get the number of threads used for model inference. +The default is the number of physical cores your computer has. + +Returns **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The number of threads used for model inference. + +##### setThreadCount + +Set the number of threads used for model inference. + +###### Parameters + +* `newNumber` **[number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)** The new number of threads. + +Returns **void** + +##### raw\_prompt + +Prompt the model with a given input and optional parameters. +This is the raw output from model. +Use the prompt function exported for a value + +###### Parameters + +* `q` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The prompt input. +* `params` **Partial<[LLModelPromptContext](#llmodelpromptcontext)>** Optional parameters for the prompt context. +* `callback` **function (res: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)): void** + +Returns **void** The result of the model prompt. + +##### embed + +Embed text with the model. Keep in mind that +not all models can embed text, (only bert can embed as of 07/16/2023 (mm/dd/yyyy)) +Use the prompt function exported for a value + +###### Parameters + +* `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** +* `q` The prompt input. +* `params` Optional parameters for the prompt context. + +Returns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The result of the model prompt. + +##### isModelLoaded + +Whether the model is loaded or not. + +Returns **[boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean)** + +##### setLibraryPath + +Where to search for the pluggable backend libraries + +###### Parameters + +* `s` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + +Returns **void** + +##### getLibraryPath + +Where to get the pluggable backend libraries + +Returns **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** + +#### loadModel + +Loads a machine learning model with the specified name. The defacto way to create a model. +By default this will download a model from the official GPT4ALL website, if a model is not present at given path. + +##### Parameters + +* `modelName` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** The name of the model to load. +* `options` **(LoadModelOptions | [undefined](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/undefined))?** (Optional) Additional options for loading the model. + +Returns **[Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)<[LLModel](#llmodel)>** A promise that resolves to an instance of the loaded LLModel. + +#### createCompletion + +The nodejs equivalent to python binding's chat\_completion + +##### Parameters + +* `llmodel` **[LLModel](#llmodel)** The language model object. +* `messages` **[Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[PromptMessage](#promptmessage)>** The array of messages for the conversation. +* `options` **[CompletionOptions](#completionoptions)** The options for creating the completion. + +##### Examples + +```javascript +const llmodel = new LLModel(model) +const messages = [ +{ role: 'system', message: 'You are a weather forecaster.' }, +{ role: 'user', message: 'should i go out today?' } ] +const completion = await createCompletion(llmodel, messages, { + verbose: true, + temp: 0.9, +}) +console.log(completion.choices[0].message.content) +// No, it's going to be cold and rainy. +``` + +Returns **[CompletionReturn](#completionreturn)** The completion result. + +#### createEmbedding + +The nodejs moral equivalent to python binding's Embed4All().embed() +meow + +##### Parameters + +* `llmodel` **[LLModel](#llmodel)** The language model object. +* `text` **[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** text to embed + +Returns **[Float32Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Float32Array)** The completion result. + +#### CompletionOptions + +**Extends Partial\** + +The options for creating the completion. + +##### verbose + +Indicates if verbose logging is enabled. + +Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean) + +##### hasDefaultHeader + +Indicates if the default header is included in the prompt. + +Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean) + +##### hasDefaultFooter + +Indicates if the default footer is included in the prompt. + +Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean) + +#### PromptMessage + +A message in the conversation, identical to OpenAI's chat message. + +##### role + +The role of the message. + +Type: (`"system"` | `"assistant"` | `"user"`) + +##### content + +The message content. + +Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) + +#### prompt\_tokens + +The number of tokens used in the prompt. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +#### completion\_tokens + +The number of tokens used in the completion. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +#### total\_tokens + +The total number of tokens used. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +#### CompletionReturn + +The result of the completion, similar to OpenAI's format. + +##### model + +The model name. + +Type: [ModelFile](#modelfile) + +##### usage + +Token usage report. + +Type: {prompt\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), completion\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number), total\_tokens: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)} + +##### choices + +The generated completions. + +Type: [Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[CompletionChoice](#completionchoice)> + +#### CompletionChoice + +A completion choice, similar to OpenAI's format. + +##### message + +Response message + +Type: [PromptMessage](#promptmessage) + +#### LLModelPromptContext + +Model inference arguments for generating completions. + +##### logits\_size + +The size of the raw logits vector. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### tokens\_size + +The size of the raw tokens vector. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### n\_past + +The number of tokens in the past conversation. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### n\_ctx + +The number of tokens possible in the context window. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### n\_predict + +The number of tokens to predict. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### top\_k + +The top-k logits to sample from. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### top\_p + +The nucleus sampling probability threshold. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### temp + +The temperature to adjust the model's output distribution. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### n\_batch + +The number of predictions to generate in parallel. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### repeat\_penalty + +The penalty factor for repeated tokens. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### repeat\_last\_n + +The number of last tokens to penalize. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +##### context\_erase + +The percentage of context to erase if the context window is exceeded. + +Type: [number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number) + +#### createTokenStream + +TODO: Help wanted to implement this + +##### Parameters + +* `llmodel` **[LLModel](#llmodel)** +* `messages` **[Array](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Array)<[PromptMessage](#promptmessage)>** +* `options` **[CompletionOptions](#completionoptions)** + +Returns **function (ll: [LLModel](#llmodel)): AsyncGenerator<[string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)>** + +#### DEFAULT\_DIRECTORY + +From python api: +models will be stored in (homedir)/.cache/gpt4all/\` + +Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) + +#### DEFAULT\_LIBRARIES\_DIRECTORY + +From python api: +The default path for dynamic libraries to be stored. +You may separate paths by a semicolon to search in multiple areas. +This searches DEFAULT\_DIRECTORY/libraries, cwd/libraries, and finally cwd. + +Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) + +#### downloadModel + +Initiates the download of a model file of a specific model type. +By default this downloads without waiting. use the controller returned to alter this behavior. + +##### Parameters + +* `modelName` **[ModelFile](#modelfile)** The model file to be downloaded. +* `options` **DownloadOptions** to pass into the downloader. Default is { location: (cwd), debug: false }. + +##### Examples + +```javascript +const controller = download('ggml-gpt4all-j-v1.3-groovy.bin') +controller.promise().then(() => console.log('Downloaded!')) +``` + +* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model already exists in the specified location. +* Throws **[Error](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Error)** If the model cannot be found at the specified url. + +Returns **[DownloadController](#downloadcontroller)** object that allows controlling the download process. + +#### DownloadModelOptions + +Options for the model download process. + +##### modelPath + +location to download the model. +Default is process.cwd(), or the current working directory + +Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) + +##### debug + +Debug mode -- check how long it took to download in seconds + +Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean) + +##### url + +Remote download url. Defaults to `https://gpt4all.io/models` + +Type: [string](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String) + +##### md5sum + +Whether to verify the hash of the download to ensure a proper download occurred. + +Type: [boolean](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Boolean) + +#### DownloadController + +Model download controller. + +##### cancel + +Cancel the request to download from gpt4all website if this is called. + +Type: function (): void + +##### promise + +Convert the downloader into a promise, allowing people to await and manage its lifetime + +Type: function (): [Promise](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise)\ diff --git a/gpt4all-bindings/python/mkdocs.yml b/gpt4all-bindings/python/mkdocs.yml index 60e3961eef1a..238e62c0baf1 100644 --- a/gpt4all-bindings/python/mkdocs.yml +++ b/gpt4all-bindings/python/mkdocs.yml @@ -13,6 +13,7 @@ nav: - 'GPT4All in Python': - 'Generation': 'gpt4all_python.md' - 'Embedding': 'gpt4all_python_embedding.md' + - 'GPT4ALL in NodeJs': 'gpt4all_typescript.md' - 'GPT4All Chat Client': 'gpt4all_chat.md' - 'gpt4all_cli.md' # - 'Tutorials': diff --git a/gpt4all-bindings/typescript/.gitignore b/gpt4all-bindings/typescript/.gitignore index 2e79d15a3f80..0297e51b244a 100644 --- a/gpt4all-bindings/typescript/.gitignore +++ b/gpt4all-bindings/typescript/.gitignore @@ -1,3 +1,10 @@ node_modules/ build/ prebuilds/ +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions +runtimes/ diff --git a/gpt4all-bindings/typescript/.yarn/releases/yarn-3.6.1.cjs b/gpt4all-bindings/typescript/.yarn/releases/yarn-3.6.1.cjs new file mode 100644 index 000000000000..5227385a9ec1 --- /dev/null +++ b/gpt4all-bindings/typescript/.yarn/releases/yarn-3.6.1.cjs @@ -0,0 +1,874 @@ +#!/usr/bin/env node +/* eslint-disable */ +//prettier-ignore +(()=>{var xge=Object.create;var lS=Object.defineProperty;var Pge=Object.getOwnPropertyDescriptor;var Dge=Object.getOwnPropertyNames;var kge=Object.getPrototypeOf,Rge=Object.prototype.hasOwnProperty;var J=(r=>typeof require<"u"?require:typeof Proxy<"u"?new Proxy(r,{get:(e,t)=>(typeof require<"u"?require:e)[t]}):r)(function(r){if(typeof require<"u")return require.apply(this,arguments);throw new Error('Dynamic require of "'+r+'" is not supported')});var Fge=(r,e)=>()=>(r&&(e=r(r=0)),e);var w=(r,e)=>()=>(e||r((e={exports:{}}).exports,e),e.exports),ut=(r,e)=>{for(var t in e)lS(r,t,{get:e[t],enumerable:!0})},Nge=(r,e,t,i)=>{if(e&&typeof e=="object"||typeof e=="function")for(let n of Dge(e))!Rge.call(r,n)&&n!==t&&lS(r,n,{get:()=>e[n],enumerable:!(i=Pge(e,n))||i.enumerable});return r};var Pe=(r,e,t)=>(t=r!=null?xge(kge(r)):{},Nge(e||!r||!r.__esModule?lS(t,"default",{value:r,enumerable:!0}):t,r));var vK=w((JXe,SK)=>{SK.exports=QK;QK.sync=tfe;var BK=J("fs");function efe(r,e){var t=e.pathExt!==void 0?e.pathExt:process.env.PATHEXT;if(!t||(t=t.split(";"),t.indexOf("")!==-1))return!0;for(var i=0;i{kK.exports=PK;PK.sync=rfe;var xK=J("fs");function PK(r,e,t){xK.stat(r,function(i,n){t(i,i?!1:DK(n,e))})}function rfe(r,e){return DK(xK.statSync(r),e)}function DK(r,e){return r.isFile()&&ife(r,e)}function ife(r,e){var t=r.mode,i=r.uid,n=r.gid,s=e.uid!==void 0?e.uid:process.getuid&&process.getuid(),o=e.gid!==void 0?e.gid:process.getgid&&process.getgid(),a=parseInt("100",8),l=parseInt("010",8),c=parseInt("001",8),u=a|l,g=t&c||t&l&&n===o||t&a&&i===s||t&u&&s===0;return g}});var NK=w((VXe,FK)=>{var zXe=J("fs"),lI;process.platform==="win32"||global.TESTING_WINDOWS?lI=vK():lI=RK();FK.exports=SS;SS.sync=nfe;function SS(r,e,t){if(typeof e=="function"&&(t=e,e={}),!t){if(typeof Promise!="function")throw new TypeError("callback not provided");return new Promise(function(i,n){SS(r,e||{},function(s,o){s?n(s):i(o)})})}lI(r,e||{},function(i,n){i&&(i.code==="EACCES"||e&&e.ignoreErrors)&&(i=null,n=!1),t(i,n)})}function nfe(r,e){try{return lI.sync(r,e||{})}catch(t){if(e&&e.ignoreErrors||t.code==="EACCES")return!1;throw t}}});var HK=w((XXe,UK)=>{var Dg=process.platform==="win32"||process.env.OSTYPE==="cygwin"||process.env.OSTYPE==="msys",TK=J("path"),sfe=Dg?";":":",LK=NK(),MK=r=>Object.assign(new Error(`not found: ${r}`),{code:"ENOENT"}),OK=(r,e)=>{let t=e.colon||sfe,i=r.match(/\//)||Dg&&r.match(/\\/)?[""]:[...Dg?[process.cwd()]:[],...(e.path||process.env.PATH||"").split(t)],n=Dg?e.pathExt||process.env.PATHEXT||".EXE;.CMD;.BAT;.COM":"",s=Dg?n.split(t):[""];return Dg&&r.indexOf(".")!==-1&&s[0]!==""&&s.unshift(""),{pathEnv:i,pathExt:s,pathExtExe:n}},KK=(r,e,t)=>{typeof e=="function"&&(t=e,e={}),e||(e={});let{pathEnv:i,pathExt:n,pathExtExe:s}=OK(r,e),o=[],a=c=>new Promise((u,g)=>{if(c===i.length)return e.all&&o.length?u(o):g(MK(r));let f=i[c],h=/^".*"$/.test(f)?f.slice(1,-1):f,p=TK.join(h,r),C=!h&&/^\.[\\\/]/.test(r)?r.slice(0,2)+p:p;u(l(C,c,0))}),l=(c,u,g)=>new Promise((f,h)=>{if(g===n.length)return f(a(u+1));let p=n[g];LK(c+p,{pathExt:s},(C,y)=>{if(!C&&y)if(e.all)o.push(c+p);else return f(c+p);return f(l(c,u,g+1))})});return t?a(0).then(c=>t(null,c),t):a(0)},ofe=(r,e)=>{e=e||{};let{pathEnv:t,pathExt:i,pathExtExe:n}=OK(r,e),s=[];for(let o=0;o{"use strict";var GK=(r={})=>{let e=r.env||process.env;return(r.platform||process.platform)!=="win32"?"PATH":Object.keys(e).reverse().find(i=>i.toUpperCase()==="PATH")||"Path"};vS.exports=GK;vS.exports.default=GK});var WK=w((_Xe,JK)=>{"use strict";var jK=J("path"),afe=HK(),Afe=YK();function qK(r,e){let t=r.options.env||process.env,i=process.cwd(),n=r.options.cwd!=null,s=n&&process.chdir!==void 0&&!process.chdir.disabled;if(s)try{process.chdir(r.options.cwd)}catch{}let o;try{o=afe.sync(r.command,{path:t[Afe({env:t})],pathExt:e?jK.delimiter:void 0})}catch{}finally{s&&process.chdir(i)}return o&&(o=jK.resolve(n?r.options.cwd:"",o)),o}function lfe(r){return qK(r)||qK(r,!0)}JK.exports=lfe});var zK=w(($Xe,PS)=>{"use strict";var xS=/([()\][%!^"`<>&|;, *?])/g;function cfe(r){return r=r.replace(xS,"^$1"),r}function ufe(r,e){return r=`${r}`,r=r.replace(/(\\*)"/g,'$1$1\\"'),r=r.replace(/(\\*)$/,"$1$1"),r=`"${r}"`,r=r.replace(xS,"^$1"),e&&(r=r.replace(xS,"^$1")),r}PS.exports.command=cfe;PS.exports.argument=ufe});var XK=w((eZe,VK)=>{"use strict";VK.exports=/^#!(.*)/});var _K=w((tZe,ZK)=>{"use strict";var gfe=XK();ZK.exports=(r="")=>{let e=r.match(gfe);if(!e)return null;let[t,i]=e[0].replace(/#! ?/,"").split(" "),n=t.split("/").pop();return n==="env"?i:i?`${n} ${i}`:n}});var eU=w((rZe,$K)=>{"use strict";var DS=J("fs"),ffe=_K();function hfe(r){let t=Buffer.alloc(150),i;try{i=DS.openSync(r,"r"),DS.readSync(i,t,0,150,0),DS.closeSync(i)}catch{}return ffe(t.toString())}$K.exports=hfe});var nU=w((iZe,iU)=>{"use strict";var pfe=J("path"),tU=WK(),rU=zK(),dfe=eU(),Cfe=process.platform==="win32",mfe=/\.(?:com|exe)$/i,Efe=/node_modules[\\/].bin[\\/][^\\/]+\.cmd$/i;function Ife(r){r.file=tU(r);let e=r.file&&dfe(r.file);return e?(r.args.unshift(r.file),r.command=e,tU(r)):r.file}function yfe(r){if(!Cfe)return r;let e=Ife(r),t=!mfe.test(e);if(r.options.forceShell||t){let i=Efe.test(e);r.command=pfe.normalize(r.command),r.command=rU.command(r.command),r.args=r.args.map(s=>rU.argument(s,i));let n=[r.command].concat(r.args).join(" ");r.args=["/d","/s","/c",`"${n}"`],r.command=process.env.comspec||"cmd.exe",r.options.windowsVerbatimArguments=!0}return r}function wfe(r,e,t){e&&!Array.isArray(e)&&(t=e,e=null),e=e?e.slice(0):[],t=Object.assign({},t);let i={command:r,args:e,options:t,file:void 0,original:{command:r,args:e}};return t.shell?i:yfe(i)}iU.exports=wfe});var aU=w((nZe,oU)=>{"use strict";var kS=process.platform==="win32";function RS(r,e){return Object.assign(new Error(`${e} ${r.command} ENOENT`),{code:"ENOENT",errno:"ENOENT",syscall:`${e} ${r.command}`,path:r.command,spawnargs:r.args})}function Bfe(r,e){if(!kS)return;let t=r.emit;r.emit=function(i,n){if(i==="exit"){let s=sU(n,e,"spawn");if(s)return t.call(r,"error",s)}return t.apply(r,arguments)}}function sU(r,e){return kS&&r===1&&!e.file?RS(e.original,"spawn"):null}function bfe(r,e){return kS&&r===1&&!e.file?RS(e.original,"spawnSync"):null}oU.exports={hookChildProcess:Bfe,verifyENOENT:sU,verifyENOENTSync:bfe,notFoundError:RS}});var TS=w((sZe,kg)=>{"use strict";var AU=J("child_process"),FS=nU(),NS=aU();function lU(r,e,t){let i=FS(r,e,t),n=AU.spawn(i.command,i.args,i.options);return NS.hookChildProcess(n,i),n}function Qfe(r,e,t){let i=FS(r,e,t),n=AU.spawnSync(i.command,i.args,i.options);return n.error=n.error||NS.verifyENOENTSync(n.status,i),n}kg.exports=lU;kg.exports.spawn=lU;kg.exports.sync=Qfe;kg.exports._parse=FS;kg.exports._enoent=NS});var uU=w((oZe,cU)=>{"use strict";function Sfe(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function Zl(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,Zl)}Sfe(Zl,Error);Zl.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g>",ie=me(">>",!1),de=">&",_e=me(">&",!1),Pt=">",It=me(">",!1),Mr="<<<",ii=me("<<<",!1),gi="<&",hr=me("<&",!1),fi="<",ni=me("<",!1),Ks=function(m){return{type:"argument",segments:[].concat(...m)}},pr=function(m){return m},Ii="$'",rs=me("$'",!1),fa="'",CA=me("'",!1),cg=function(m){return[{type:"text",text:m}]},is='""',mA=me('""',!1),ha=function(){return{type:"text",text:""}},wp='"',EA=me('"',!1),IA=function(m){return m},wr=function(m){return{type:"arithmetic",arithmetic:m,quoted:!0}},Tl=function(m){return{type:"shell",shell:m,quoted:!0}},ug=function(m){return{type:"variable",...m,quoted:!0}},Io=function(m){return{type:"text",text:m}},gg=function(m){return{type:"arithmetic",arithmetic:m,quoted:!1}},Bp=function(m){return{type:"shell",shell:m,quoted:!1}},bp=function(m){return{type:"variable",...m,quoted:!1}},vr=function(m){return{type:"glob",pattern:m}},se=/^[^']/,yo=Je(["'"],!0,!1),Fn=function(m){return m.join("")},fg=/^[^$"]/,bt=Je(["$",'"'],!0,!1),Ll=`\\ +`,Nn=me(`\\ +`,!1),ns=function(){return""},ss="\\",gt=me("\\",!1),wo=/^[\\$"`]/,At=Je(["\\","$",'"',"`"],!1,!1),ln=function(m){return m},S="\\a",Lt=me("\\a",!1),hg=function(){return"a"},Ml="\\b",Qp=me("\\b",!1),Sp=function(){return"\b"},vp=/^[Ee]/,xp=Je(["E","e"],!1,!1),Pp=function(){return"\x1B"},G="\\f",yt=me("\\f",!1),yA=function(){return"\f"},zi="\\n",Ol=me("\\n",!1),Xe=function(){return` +`},pa="\\r",pg=me("\\r",!1),ME=function(){return"\r"},Dp="\\t",OE=me("\\t",!1),ar=function(){return" "},Tn="\\v",Kl=me("\\v",!1),kp=function(){return"\v"},Us=/^[\\'"?]/,da=Je(["\\","'",'"',"?"],!1,!1),cn=function(m){return String.fromCharCode(parseInt(m,16))},Le="\\x",dg=me("\\x",!1),Ul="\\u",Hs=me("\\u",!1),Hl="\\U",wA=me("\\U",!1),Cg=function(m){return String.fromCodePoint(parseInt(m,16))},mg=/^[0-7]/,Ca=Je([["0","7"]],!1,!1),ma=/^[0-9a-fA-f]/,rt=Je([["0","9"],["a","f"],["A","f"]],!1,!1),Bo=nt(),BA="-",Gl=me("-",!1),Gs="+",Yl=me("+",!1),KE=".",Rp=me(".",!1),Eg=function(m,Q,N){return{type:"number",value:(m==="-"?-1:1)*parseFloat(Q.join("")+"."+N.join(""))}},Fp=function(m,Q){return{type:"number",value:(m==="-"?-1:1)*parseInt(Q.join(""))}},UE=function(m){return{type:"variable",...m}},jl=function(m){return{type:"variable",name:m}},HE=function(m){return m},Ig="*",bA=me("*",!1),Rr="/",GE=me("/",!1),Ys=function(m,Q,N){return{type:Q==="*"?"multiplication":"division",right:N}},js=function(m,Q){return Q.reduce((N,U)=>({left:N,...U}),m)},yg=function(m,Q,N){return{type:Q==="+"?"addition":"subtraction",right:N}},QA="$((",R=me("$((",!1),q="))",Ce=me("))",!1),Ke=function(m){return m},Re="$(",ze=me("$(",!1),dt=function(m){return m},Ft="${",Ln=me("${",!1),JQ=":-",P1=me(":-",!1),D1=function(m,Q){return{name:m,defaultValue:Q}},WQ=":-}",k1=me(":-}",!1),R1=function(m){return{name:m,defaultValue:[]}},zQ=":+",F1=me(":+",!1),N1=function(m,Q){return{name:m,alternativeValue:Q}},VQ=":+}",T1=me(":+}",!1),L1=function(m){return{name:m,alternativeValue:[]}},XQ=function(m){return{name:m}},M1="$",O1=me("$",!1),K1=function(m){return e.isGlobPattern(m)},U1=function(m){return m},ZQ=/^[a-zA-Z0-9_]/,_Q=Je([["a","z"],["A","Z"],["0","9"],"_"],!1,!1),$Q=function(){return L()},eS=/^[$@*?#a-zA-Z0-9_\-]/,tS=Je(["$","@","*","?","#",["a","z"],["A","Z"],["0","9"],"_","-"],!1,!1),H1=/^[(){}<>$|&; \t"']/,wg=Je(["(",")","{","}","<",">","$","|","&",";"," "," ",'"',"'"],!1,!1),rS=/^[<>&; \t"']/,iS=Je(["<",">","&",";"," "," ",'"',"'"],!1,!1),YE=/^[ \t]/,jE=Je([" "," "],!1,!1),b=0,Oe=0,SA=[{line:1,column:1}],d=0,E=[],I=0,k;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function L(){return r.substring(Oe,b)}function Z(){return Et(Oe,b)}function te(m,Q){throw Q=Q!==void 0?Q:Et(Oe,b),Ri([lt(m)],r.substring(Oe,b),Q)}function we(m,Q){throw Q=Q!==void 0?Q:Et(Oe,b),Mn(m,Q)}function me(m,Q){return{type:"literal",text:m,ignoreCase:Q}}function Je(m,Q,N){return{type:"class",parts:m,inverted:Q,ignoreCase:N}}function nt(){return{type:"any"}}function wt(){return{type:"end"}}function lt(m){return{type:"other",description:m}}function it(m){var Q=SA[m],N;if(Q)return Q;for(N=m-1;!SA[N];)N--;for(Q=SA[N],Q={line:Q.line,column:Q.column};Nd&&(d=b,E=[]),E.push(m))}function Mn(m,Q){return new Zl(m,null,null,Q)}function Ri(m,Q,N){return new Zl(Zl.buildMessage(m,Q),m,Q,N)}function vA(){var m,Q;return m=b,Q=Or(),Q===t&&(Q=null),Q!==t&&(Oe=m,Q=s(Q)),m=Q,m}function Or(){var m,Q,N,U,ce;if(m=b,Q=Kr(),Q!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();N!==t?(U=Ea(),U!==t?(ce=os(),ce===t&&(ce=null),ce!==t?(Oe=m,Q=o(Q,U,ce),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)}else b=m,m=t;if(m===t)if(m=b,Q=Kr(),Q!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();N!==t?(U=Ea(),U===t&&(U=null),U!==t?(Oe=m,Q=a(Q,U),m=Q):(b=m,m=t)):(b=m,m=t)}else b=m,m=t;return m}function os(){var m,Q,N,U,ce;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t)if(N=Or(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Oe=m,Q=l(N),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t;return m}function Ea(){var m;return r.charCodeAt(b)===59?(m=c,b++):(m=t,I===0&&be(u)),m===t&&(r.charCodeAt(b)===38?(m=g,b++):(m=t,I===0&&be(f))),m}function Kr(){var m,Q,N;return m=b,Q=G1(),Q!==t?(N=uge(),N===t&&(N=null),N!==t?(Oe=m,Q=h(Q,N),m=Q):(b=m,m=t)):(b=m,m=t),m}function uge(){var m,Q,N,U,ce,Se,ht;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t)if(N=gge(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Kr(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Oe=m,Q=p(N,ce),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t;return m}function gge(){var m;return r.substr(b,2)===C?(m=C,b+=2):(m=t,I===0&&be(y)),m===t&&(r.substr(b,2)===B?(m=B,b+=2):(m=t,I===0&&be(v))),m}function G1(){var m,Q,N;return m=b,Q=pge(),Q!==t?(N=fge(),N===t&&(N=null),N!==t?(Oe=m,Q=D(Q,N),m=Q):(b=m,m=t)):(b=m,m=t),m}function fge(){var m,Q,N,U,ce,Se,ht;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t)if(N=hge(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=G1(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Oe=m,Q=T(N,ce),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t;return m}function hge(){var m;return r.substr(b,2)===H?(m=H,b+=2):(m=t,I===0&&be(j)),m===t&&(r.charCodeAt(b)===124?(m=$,b++):(m=t,I===0&&be(V))),m}function qE(){var m,Q,N,U,ce,Se;if(m=b,Q=eK(),Q!==t)if(r.charCodeAt(b)===61?(N=W,b++):(N=t,I===0&&be(_)),N!==t)if(U=q1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(Oe=m,Q=A(Q,U),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t;else b=m,m=t;if(m===t)if(m=b,Q=eK(),Q!==t)if(r.charCodeAt(b)===61?(N=W,b++):(N=t,I===0&&be(_)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Oe=m,Q=Ae(Q),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t;return m}function pge(){var m,Q,N,U,ce,Se,ht,Bt,Jr,hi,as;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t)if(r.charCodeAt(b)===40?(N=ge,b++):(N=t,I===0&&be(re)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Or(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();if(Se!==t)if(r.charCodeAt(b)===41?(ht=M,b++):(ht=t,I===0&&be(F)),ht!==t){for(Bt=[],Jr=He();Jr!==t;)Bt.push(Jr),Jr=He();if(Bt!==t){for(Jr=[],hi=Np();hi!==t;)Jr.push(hi),hi=Np();if(Jr!==t){for(hi=[],as=He();as!==t;)hi.push(as),as=He();hi!==t?(Oe=m,Q=ue(ce,Jr),m=Q):(b=m,m=t)}else b=m,m=t}else b=m,m=t}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t;if(m===t){for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t)if(r.charCodeAt(b)===123?(N=pe,b++):(N=t,I===0&&be(ke)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Or(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();if(Se!==t)if(r.charCodeAt(b)===125?(ht=Fe,b++):(ht=t,I===0&&be(Ne)),ht!==t){for(Bt=[],Jr=He();Jr!==t;)Bt.push(Jr),Jr=He();if(Bt!==t){for(Jr=[],hi=Np();hi!==t;)Jr.push(hi),hi=Np();if(Jr!==t){for(hi=[],as=He();as!==t;)hi.push(as),as=He();hi!==t?(Oe=m,Q=oe(ce,Jr),m=Q):(b=m,m=t)}else b=m,m=t}else b=m,m=t}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t}else b=m,m=t;else b=m,m=t;if(m===t){for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t){for(N=[],U=qE();U!==t;)N.push(U),U=qE();if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t){if(ce=[],Se=j1(),Se!==t)for(;Se!==t;)ce.push(Se),Se=j1();else ce=t;if(ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Oe=m,Q=le(N,ce),m=Q):(b=m,m=t)}else b=m,m=t}else b=m,m=t}else b=m,m=t}else b=m,m=t;if(m===t){for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t){if(N=[],U=qE(),U!==t)for(;U!==t;)N.push(U),U=qE();else N=t;if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Oe=m,Q=Be(N),m=Q):(b=m,m=t)}else b=m,m=t}else b=m,m=t}}}return m}function Y1(){var m,Q,N,U,ce;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t){if(N=[],U=JE(),U!==t)for(;U!==t;)N.push(U),U=JE();else N=t;if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Oe=m,Q=fe(N),m=Q):(b=m,m=t)}else b=m,m=t}else b=m,m=t;return m}function j1(){var m,Q,N;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();if(Q!==t?(N=Np(),N!==t?(Oe=m,Q=ae(N),m=Q):(b=m,m=t)):(b=m,m=t),m===t){for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();Q!==t?(N=JE(),N!==t?(Oe=m,Q=ae(N),m=Q):(b=m,m=t)):(b=m,m=t)}return m}function Np(){var m,Q,N,U,ce;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();return Q!==t?(qe.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(ne)),N===t&&(N=null),N!==t?(U=dge(),U!==t?(ce=JE(),ce!==t?(Oe=m,Q=Y(N,U,ce),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m}function dge(){var m;return r.substr(b,2)===he?(m=he,b+=2):(m=t,I===0&&be(ie)),m===t&&(r.substr(b,2)===de?(m=de,b+=2):(m=t,I===0&&be(_e)),m===t&&(r.charCodeAt(b)===62?(m=Pt,b++):(m=t,I===0&&be(It)),m===t&&(r.substr(b,3)===Mr?(m=Mr,b+=3):(m=t,I===0&&be(ii)),m===t&&(r.substr(b,2)===gi?(m=gi,b+=2):(m=t,I===0&&be(hr)),m===t&&(r.charCodeAt(b)===60?(m=fi,b++):(m=t,I===0&&be(ni))))))),m}function JE(){var m,Q,N;for(m=b,Q=[],N=He();N!==t;)Q.push(N),N=He();return Q!==t?(N=q1(),N!==t?(Oe=m,Q=ae(N),m=Q):(b=m,m=t)):(b=m,m=t),m}function q1(){var m,Q,N;if(m=b,Q=[],N=J1(),N!==t)for(;N!==t;)Q.push(N),N=J1();else Q=t;return Q!==t&&(Oe=m,Q=Ks(Q)),m=Q,m}function J1(){var m,Q;return m=b,Q=Cge(),Q!==t&&(Oe=m,Q=pr(Q)),m=Q,m===t&&(m=b,Q=mge(),Q!==t&&(Oe=m,Q=pr(Q)),m=Q,m===t&&(m=b,Q=Ege(),Q!==t&&(Oe=m,Q=pr(Q)),m=Q,m===t&&(m=b,Q=Ige(),Q!==t&&(Oe=m,Q=pr(Q)),m=Q))),m}function Cge(){var m,Q,N,U;return m=b,r.substr(b,2)===Ii?(Q=Ii,b+=2):(Q=t,I===0&&be(rs)),Q!==t?(N=Bge(),N!==t?(r.charCodeAt(b)===39?(U=fa,b++):(U=t,I===0&&be(CA)),U!==t?(Oe=m,Q=cg(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m}function mge(){var m,Q,N,U;return m=b,r.charCodeAt(b)===39?(Q=fa,b++):(Q=t,I===0&&be(CA)),Q!==t?(N=yge(),N!==t?(r.charCodeAt(b)===39?(U=fa,b++):(U=t,I===0&&be(CA)),U!==t?(Oe=m,Q=cg(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m}function Ege(){var m,Q,N,U;if(m=b,r.substr(b,2)===is?(Q=is,b+=2):(Q=t,I===0&&be(mA)),Q!==t&&(Oe=m,Q=ha()),m=Q,m===t)if(m=b,r.charCodeAt(b)===34?(Q=wp,b++):(Q=t,I===0&&be(EA)),Q!==t){for(N=[],U=W1();U!==t;)N.push(U),U=W1();N!==t?(r.charCodeAt(b)===34?(U=wp,b++):(U=t,I===0&&be(EA)),U!==t?(Oe=m,Q=IA(N),m=Q):(b=m,m=t)):(b=m,m=t)}else b=m,m=t;return m}function Ige(){var m,Q,N;if(m=b,Q=[],N=z1(),N!==t)for(;N!==t;)Q.push(N),N=z1();else Q=t;return Q!==t&&(Oe=m,Q=IA(Q)),m=Q,m}function W1(){var m,Q;return m=b,Q=_1(),Q!==t&&(Oe=m,Q=wr(Q)),m=Q,m===t&&(m=b,Q=$1(),Q!==t&&(Oe=m,Q=Tl(Q)),m=Q,m===t&&(m=b,Q=aS(),Q!==t&&(Oe=m,Q=ug(Q)),m=Q,m===t&&(m=b,Q=wge(),Q!==t&&(Oe=m,Q=Io(Q)),m=Q))),m}function z1(){var m,Q;return m=b,Q=_1(),Q!==t&&(Oe=m,Q=gg(Q)),m=Q,m===t&&(m=b,Q=$1(),Q!==t&&(Oe=m,Q=Bp(Q)),m=Q,m===t&&(m=b,Q=aS(),Q!==t&&(Oe=m,Q=bp(Q)),m=Q,m===t&&(m=b,Q=Sge(),Q!==t&&(Oe=m,Q=vr(Q)),m=Q,m===t&&(m=b,Q=Qge(),Q!==t&&(Oe=m,Q=Io(Q)),m=Q)))),m}function yge(){var m,Q,N;for(m=b,Q=[],se.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(yo));N!==t;)Q.push(N),se.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(yo));return Q!==t&&(Oe=m,Q=Fn(Q)),m=Q,m}function wge(){var m,Q,N;if(m=b,Q=[],N=V1(),N===t&&(fg.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(bt))),N!==t)for(;N!==t;)Q.push(N),N=V1(),N===t&&(fg.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(bt)));else Q=t;return Q!==t&&(Oe=m,Q=Fn(Q)),m=Q,m}function V1(){var m,Q,N;return m=b,r.substr(b,2)===Ll?(Q=Ll,b+=2):(Q=t,I===0&&be(Nn)),Q!==t&&(Oe=m,Q=ns()),m=Q,m===t&&(m=b,r.charCodeAt(b)===92?(Q=ss,b++):(Q=t,I===0&&be(gt)),Q!==t?(wo.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(At)),N!==t?(Oe=m,Q=ln(N),m=Q):(b=m,m=t)):(b=m,m=t)),m}function Bge(){var m,Q,N;for(m=b,Q=[],N=X1(),N===t&&(se.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(yo)));N!==t;)Q.push(N),N=X1(),N===t&&(se.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(yo)));return Q!==t&&(Oe=m,Q=Fn(Q)),m=Q,m}function X1(){var m,Q,N;return m=b,r.substr(b,2)===S?(Q=S,b+=2):(Q=t,I===0&&be(Lt)),Q!==t&&(Oe=m,Q=hg()),m=Q,m===t&&(m=b,r.substr(b,2)===Ml?(Q=Ml,b+=2):(Q=t,I===0&&be(Qp)),Q!==t&&(Oe=m,Q=Sp()),m=Q,m===t&&(m=b,r.charCodeAt(b)===92?(Q=ss,b++):(Q=t,I===0&&be(gt)),Q!==t?(vp.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(xp)),N!==t?(Oe=m,Q=Pp(),m=Q):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===G?(Q=G,b+=2):(Q=t,I===0&&be(yt)),Q!==t&&(Oe=m,Q=yA()),m=Q,m===t&&(m=b,r.substr(b,2)===zi?(Q=zi,b+=2):(Q=t,I===0&&be(Ol)),Q!==t&&(Oe=m,Q=Xe()),m=Q,m===t&&(m=b,r.substr(b,2)===pa?(Q=pa,b+=2):(Q=t,I===0&&be(pg)),Q!==t&&(Oe=m,Q=ME()),m=Q,m===t&&(m=b,r.substr(b,2)===Dp?(Q=Dp,b+=2):(Q=t,I===0&&be(OE)),Q!==t&&(Oe=m,Q=ar()),m=Q,m===t&&(m=b,r.substr(b,2)===Tn?(Q=Tn,b+=2):(Q=t,I===0&&be(Kl)),Q!==t&&(Oe=m,Q=kp()),m=Q,m===t&&(m=b,r.charCodeAt(b)===92?(Q=ss,b++):(Q=t,I===0&&be(gt)),Q!==t?(Us.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(da)),N!==t?(Oe=m,Q=ln(N),m=Q):(b=m,m=t)):(b=m,m=t),m===t&&(m=bge()))))))))),m}function bge(){var m,Q,N,U,ce,Se,ht,Bt,Jr,hi,as,AS;return m=b,r.charCodeAt(b)===92?(Q=ss,b++):(Q=t,I===0&&be(gt)),Q!==t?(N=nS(),N!==t?(Oe=m,Q=cn(N),m=Q):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Le?(Q=Le,b+=2):(Q=t,I===0&&be(dg)),Q!==t?(N=b,U=b,ce=nS(),ce!==t?(Se=On(),Se!==t?(ce=[ce,Se],U=ce):(b=U,U=t)):(b=U,U=t),U===t&&(U=nS()),U!==t?N=r.substring(N,b):N=U,N!==t?(Oe=m,Q=cn(N),m=Q):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Ul?(Q=Ul,b+=2):(Q=t,I===0&&be(Hs)),Q!==t?(N=b,U=b,ce=On(),ce!==t?(Se=On(),Se!==t?(ht=On(),ht!==t?(Bt=On(),Bt!==t?(ce=[ce,Se,ht,Bt],U=ce):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t),U!==t?N=r.substring(N,b):N=U,N!==t?(Oe=m,Q=cn(N),m=Q):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Hl?(Q=Hl,b+=2):(Q=t,I===0&&be(wA)),Q!==t?(N=b,U=b,ce=On(),ce!==t?(Se=On(),Se!==t?(ht=On(),ht!==t?(Bt=On(),Bt!==t?(Jr=On(),Jr!==t?(hi=On(),hi!==t?(as=On(),as!==t?(AS=On(),AS!==t?(ce=[ce,Se,ht,Bt,Jr,hi,as,AS],U=ce):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t)):(b=U,U=t),U!==t?N=r.substring(N,b):N=U,N!==t?(Oe=m,Q=Cg(N),m=Q):(b=m,m=t)):(b=m,m=t)))),m}function nS(){var m;return mg.test(r.charAt(b))?(m=r.charAt(b),b++):(m=t,I===0&&be(Ca)),m}function On(){var m;return ma.test(r.charAt(b))?(m=r.charAt(b),b++):(m=t,I===0&&be(rt)),m}function Qge(){var m,Q,N,U,ce;if(m=b,Q=[],N=b,r.charCodeAt(b)===92?(U=ss,b++):(U=t,I===0&&be(gt)),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t),N===t&&(N=b,U=b,I++,ce=tK(),I--,ce===t?U=void 0:(b=U,U=t),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t)),N!==t)for(;N!==t;)Q.push(N),N=b,r.charCodeAt(b)===92?(U=ss,b++):(U=t,I===0&&be(gt)),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t),N===t&&(N=b,U=b,I++,ce=tK(),I--,ce===t?U=void 0:(b=U,U=t),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t));else Q=t;return Q!==t&&(Oe=m,Q=Fn(Q)),m=Q,m}function sS(){var m,Q,N,U,ce,Se;if(m=b,r.charCodeAt(b)===45?(Q=BA,b++):(Q=t,I===0&&be(Gl)),Q===t&&(r.charCodeAt(b)===43?(Q=Gs,b++):(Q=t,I===0&&be(Yl))),Q===t&&(Q=null),Q!==t){if(N=[],qe.test(r.charAt(b))?(U=r.charAt(b),b++):(U=t,I===0&&be(ne)),U!==t)for(;U!==t;)N.push(U),qe.test(r.charAt(b))?(U=r.charAt(b),b++):(U=t,I===0&&be(ne));else N=t;if(N!==t)if(r.charCodeAt(b)===46?(U=KE,b++):(U=t,I===0&&be(Rp)),U!==t){if(ce=[],qe.test(r.charAt(b))?(Se=r.charAt(b),b++):(Se=t,I===0&&be(ne)),Se!==t)for(;Se!==t;)ce.push(Se),qe.test(r.charAt(b))?(Se=r.charAt(b),b++):(Se=t,I===0&&be(ne));else ce=t;ce!==t?(Oe=m,Q=Eg(Q,N,ce),m=Q):(b=m,m=t)}else b=m,m=t;else b=m,m=t}else b=m,m=t;if(m===t){if(m=b,r.charCodeAt(b)===45?(Q=BA,b++):(Q=t,I===0&&be(Gl)),Q===t&&(r.charCodeAt(b)===43?(Q=Gs,b++):(Q=t,I===0&&be(Yl))),Q===t&&(Q=null),Q!==t){if(N=[],qe.test(r.charAt(b))?(U=r.charAt(b),b++):(U=t,I===0&&be(ne)),U!==t)for(;U!==t;)N.push(U),qe.test(r.charAt(b))?(U=r.charAt(b),b++):(U=t,I===0&&be(ne));else N=t;N!==t?(Oe=m,Q=Fp(Q,N),m=Q):(b=m,m=t)}else b=m,m=t;if(m===t&&(m=b,Q=aS(),Q!==t&&(Oe=m,Q=UE(Q)),m=Q,m===t&&(m=b,Q=ql(),Q!==t&&(Oe=m,Q=jl(Q)),m=Q,m===t)))if(m=b,r.charCodeAt(b)===40?(Q=ge,b++):(Q=t,I===0&&be(re)),Q!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();if(N!==t)if(U=Z1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(r.charCodeAt(b)===41?(Se=M,b++):(Se=t,I===0&&be(F)),Se!==t?(Oe=m,Q=HE(U),m=Q):(b=m,m=t)):(b=m,m=t)}else b=m,m=t;else b=m,m=t}else b=m,m=t}return m}function oS(){var m,Q,N,U,ce,Se,ht,Bt;if(m=b,Q=sS(),Q!==t){for(N=[],U=b,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(b)===42?(Se=Ig,b++):(Se=t,I===0&&be(bA)),Se===t&&(r.charCodeAt(b)===47?(Se=Rr,b++):(Se=t,I===0&&be(GE))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=sS(),Bt!==t?(Oe=U,ce=Ys(Q,Se,Bt),U=ce):(b=U,U=t)):(b=U,U=t)}else b=U,U=t;else b=U,U=t;for(;U!==t;){for(N.push(U),U=b,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(b)===42?(Se=Ig,b++):(Se=t,I===0&&be(bA)),Se===t&&(r.charCodeAt(b)===47?(Se=Rr,b++):(Se=t,I===0&&be(GE))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=sS(),Bt!==t?(Oe=U,ce=Ys(Q,Se,Bt),U=ce):(b=U,U=t)):(b=U,U=t)}else b=U,U=t;else b=U,U=t}N!==t?(Oe=m,Q=js(Q,N),m=Q):(b=m,m=t)}else b=m,m=t;return m}function Z1(){var m,Q,N,U,ce,Se,ht,Bt;if(m=b,Q=oS(),Q!==t){for(N=[],U=b,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(b)===43?(Se=Gs,b++):(Se=t,I===0&&be(Yl)),Se===t&&(r.charCodeAt(b)===45?(Se=BA,b++):(Se=t,I===0&&be(Gl))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=oS(),Bt!==t?(Oe=U,ce=yg(Q,Se,Bt),U=ce):(b=U,U=t)):(b=U,U=t)}else b=U,U=t;else b=U,U=t;for(;U!==t;){for(N.push(U),U=b,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(b)===43?(Se=Gs,b++):(Se=t,I===0&&be(Yl)),Se===t&&(r.charCodeAt(b)===45?(Se=BA,b++):(Se=t,I===0&&be(Gl))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=oS(),Bt!==t?(Oe=U,ce=yg(Q,Se,Bt),U=ce):(b=U,U=t)):(b=U,U=t)}else b=U,U=t;else b=U,U=t}N!==t?(Oe=m,Q=js(Q,N),m=Q):(b=m,m=t)}else b=m,m=t;return m}function _1(){var m,Q,N,U,ce,Se;if(m=b,r.substr(b,3)===QA?(Q=QA,b+=3):(Q=t,I===0&&be(R)),Q!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();if(N!==t)if(U=Z1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(r.substr(b,2)===q?(Se=q,b+=2):(Se=t,I===0&&be(Ce)),Se!==t?(Oe=m,Q=Ke(U),m=Q):(b=m,m=t)):(b=m,m=t)}else b=m,m=t;else b=m,m=t}else b=m,m=t;return m}function $1(){var m,Q,N,U;return m=b,r.substr(b,2)===Re?(Q=Re,b+=2):(Q=t,I===0&&be(ze)),Q!==t?(N=Or(),N!==t?(r.charCodeAt(b)===41?(U=M,b++):(U=t,I===0&&be(F)),U!==t?(Oe=m,Q=dt(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m}function aS(){var m,Q,N,U,ce,Se;return m=b,r.substr(b,2)===Ft?(Q=Ft,b+=2):(Q=t,I===0&&be(Ln)),Q!==t?(N=ql(),N!==t?(r.substr(b,2)===JQ?(U=JQ,b+=2):(U=t,I===0&&be(P1)),U!==t?(ce=Y1(),ce!==t?(r.charCodeAt(b)===125?(Se=Fe,b++):(Se=t,I===0&&be(Ne)),Se!==t?(Oe=m,Q=D1(N,ce),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Ft?(Q=Ft,b+=2):(Q=t,I===0&&be(Ln)),Q!==t?(N=ql(),N!==t?(r.substr(b,3)===WQ?(U=WQ,b+=3):(U=t,I===0&&be(k1)),U!==t?(Oe=m,Q=R1(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Ft?(Q=Ft,b+=2):(Q=t,I===0&&be(Ln)),Q!==t?(N=ql(),N!==t?(r.substr(b,2)===zQ?(U=zQ,b+=2):(U=t,I===0&&be(F1)),U!==t?(ce=Y1(),ce!==t?(r.charCodeAt(b)===125?(Se=Fe,b++):(Se=t,I===0&&be(Ne)),Se!==t?(Oe=m,Q=N1(N,ce),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Ft?(Q=Ft,b+=2):(Q=t,I===0&&be(Ln)),Q!==t?(N=ql(),N!==t?(r.substr(b,3)===VQ?(U=VQ,b+=3):(U=t,I===0&&be(T1)),U!==t?(Oe=m,Q=L1(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.substr(b,2)===Ft?(Q=Ft,b+=2):(Q=t,I===0&&be(Ln)),Q!==t?(N=ql(),N!==t?(r.charCodeAt(b)===125?(U=Fe,b++):(U=t,I===0&&be(Ne)),U!==t?(Oe=m,Q=XQ(N),m=Q):(b=m,m=t)):(b=m,m=t)):(b=m,m=t),m===t&&(m=b,r.charCodeAt(b)===36?(Q=M1,b++):(Q=t,I===0&&be(O1)),Q!==t?(N=ql(),N!==t?(Oe=m,Q=XQ(N),m=Q):(b=m,m=t)):(b=m,m=t)))))),m}function Sge(){var m,Q,N;return m=b,Q=vge(),Q!==t?(Oe=b,N=K1(Q),N?N=void 0:N=t,N!==t?(Oe=m,Q=U1(Q),m=Q):(b=m,m=t)):(b=m,m=t),m}function vge(){var m,Q,N,U,ce;if(m=b,Q=[],N=b,U=b,I++,ce=rK(),I--,ce===t?U=void 0:(b=U,U=t),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t),N!==t)for(;N!==t;)Q.push(N),N=b,U=b,I++,ce=rK(),I--,ce===t?U=void 0:(b=U,U=t),U!==t?(r.length>b?(ce=r.charAt(b),b++):(ce=t,I===0&&be(Bo)),ce!==t?(Oe=N,U=ln(ce),N=U):(b=N,N=t)):(b=N,N=t);else Q=t;return Q!==t&&(Oe=m,Q=Fn(Q)),m=Q,m}function eK(){var m,Q,N;if(m=b,Q=[],ZQ.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(_Q)),N!==t)for(;N!==t;)Q.push(N),ZQ.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(_Q));else Q=t;return Q!==t&&(Oe=m,Q=$Q()),m=Q,m}function ql(){var m,Q,N;if(m=b,Q=[],eS.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(tS)),N!==t)for(;N!==t;)Q.push(N),eS.test(r.charAt(b))?(N=r.charAt(b),b++):(N=t,I===0&&be(tS));else Q=t;return Q!==t&&(Oe=m,Q=$Q()),m=Q,m}function tK(){var m;return H1.test(r.charAt(b))?(m=r.charAt(b),b++):(m=t,I===0&&be(wg)),m}function rK(){var m;return rS.test(r.charAt(b))?(m=r.charAt(b),b++):(m=t,I===0&&be(iS)),m}function He(){var m,Q;if(m=[],YE.test(r.charAt(b))?(Q=r.charAt(b),b++):(Q=t,I===0&&be(jE)),Q!==t)for(;Q!==t;)m.push(Q),YE.test(r.charAt(b))?(Q=r.charAt(b),b++):(Q=t,I===0&&be(jE));else m=t;return m}if(k=n(),k!==t&&b===r.length)return k;throw k!==t&&b{"use strict";function xfe(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function $l(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,$l)}xfe($l,Error);$l.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;gH&&(H=v,j=[]),j.push(ne))}function Ne(ne,Y){return new $l(ne,null,null,Y)}function oe(ne,Y,he){return new $l($l.buildMessage(ne,Y),ne,Y,he)}function le(){var ne,Y,he,ie;return ne=v,Y=Be(),Y!==t?(r.charCodeAt(v)===47?(he=s,v++):(he=t,$===0&&Fe(o)),he!==t?(ie=Be(),ie!==t?(D=ne,Y=a(Y,ie),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=Be(),Y!==t&&(D=ne,Y=l(Y)),ne=Y),ne}function Be(){var ne,Y,he,ie;return ne=v,Y=fe(),Y!==t?(r.charCodeAt(v)===64?(he=c,v++):(he=t,$===0&&Fe(u)),he!==t?(ie=qe(),ie!==t?(D=ne,Y=g(Y,ie),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=fe(),Y!==t&&(D=ne,Y=f(Y)),ne=Y),ne}function fe(){var ne,Y,he,ie,de;return ne=v,r.charCodeAt(v)===64?(Y=c,v++):(Y=t,$===0&&Fe(u)),Y!==t?(he=ae(),he!==t?(r.charCodeAt(v)===47?(ie=s,v++):(ie=t,$===0&&Fe(o)),ie!==t?(de=ae(),de!==t?(D=ne,Y=h(),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=ae(),Y!==t&&(D=ne,Y=h()),ne=Y),ne}function ae(){var ne,Y,he;if(ne=v,Y=[],p.test(r.charAt(v))?(he=r.charAt(v),v++):(he=t,$===0&&Fe(C)),he!==t)for(;he!==t;)Y.push(he),p.test(r.charAt(v))?(he=r.charAt(v),v++):(he=t,$===0&&Fe(C));else Y=t;return Y!==t&&(D=ne,Y=h()),ne=Y,ne}function qe(){var ne,Y,he;if(ne=v,Y=[],y.test(r.charAt(v))?(he=r.charAt(v),v++):(he=t,$===0&&Fe(B)),he!==t)for(;he!==t;)Y.push(he),y.test(r.charAt(v))?(he=r.charAt(v),v++):(he=t,$===0&&Fe(B));else Y=t;return Y!==t&&(D=ne,Y=h()),ne=Y,ne}if(V=n(),V!==t&&v===r.length)return V;throw V!==t&&v{"use strict";function dU(r){return typeof r>"u"||r===null}function Dfe(r){return typeof r=="object"&&r!==null}function kfe(r){return Array.isArray(r)?r:dU(r)?[]:[r]}function Rfe(r,e){var t,i,n,s;if(e)for(s=Object.keys(e),t=0,i=s.length;t{"use strict";function Vp(r,e){Error.call(this),this.name="YAMLException",this.reason=r,this.mark=e,this.message=(this.reason||"(unknown reason)")+(this.mark?" "+this.mark.toString():""),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack||""}Vp.prototype=Object.create(Error.prototype);Vp.prototype.constructor=Vp;Vp.prototype.toString=function(e){var t=this.name+": ";return t+=this.reason||"(unknown reason)",!e&&this.mark&&(t+=" "+this.mark.toString()),t};CU.exports=Vp});var IU=w((bZe,EU)=>{"use strict";var mU=tc();function HS(r,e,t,i,n){this.name=r,this.buffer=e,this.position=t,this.line=i,this.column=n}HS.prototype.getSnippet=function(e,t){var i,n,s,o,a;if(!this.buffer)return null;for(e=e||4,t=t||75,i="",n=this.position;n>0&&`\0\r +\x85\u2028\u2029`.indexOf(this.buffer.charAt(n-1))===-1;)if(n-=1,this.position-n>t/2-1){i=" ... ",n+=5;break}for(s="",o=this.position;ot/2-1){s=" ... ",o-=5;break}return a=this.buffer.slice(n,o),mU.repeat(" ",e)+i+a+s+` +`+mU.repeat(" ",e+this.position-n+i.length)+"^"};HS.prototype.toString=function(e){var t,i="";return this.name&&(i+='in "'+this.name+'" '),i+="at line "+(this.line+1)+", column "+(this.column+1),e||(t=this.getSnippet(),t&&(i+=`: +`+t)),i};EU.exports=HS});var si=w((QZe,wU)=>{"use strict";var yU=Ng(),Tfe=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],Lfe=["scalar","sequence","mapping"];function Mfe(r){var e={};return r!==null&&Object.keys(r).forEach(function(t){r[t].forEach(function(i){e[String(i)]=t})}),e}function Ofe(r,e){if(e=e||{},Object.keys(e).forEach(function(t){if(Tfe.indexOf(t)===-1)throw new yU('Unknown option "'+t+'" is met in definition of "'+r+'" YAML type.')}),this.tag=r,this.kind=e.kind||null,this.resolve=e.resolve||function(){return!0},this.construct=e.construct||function(t){return t},this.instanceOf=e.instanceOf||null,this.predicate=e.predicate||null,this.represent=e.represent||null,this.defaultStyle=e.defaultStyle||null,this.styleAliases=Mfe(e.styleAliases||null),Lfe.indexOf(this.kind)===-1)throw new yU('Unknown kind "'+this.kind+'" is specified for "'+r+'" YAML type.')}wU.exports=Ofe});var rc=w((SZe,bU)=>{"use strict";var BU=tc(),dI=Ng(),Kfe=si();function GS(r,e,t){var i=[];return r.include.forEach(function(n){t=GS(n,e,t)}),r[e].forEach(function(n){t.forEach(function(s,o){s.tag===n.tag&&s.kind===n.kind&&i.push(o)}),t.push(n)}),t.filter(function(n,s){return i.indexOf(s)===-1})}function Ufe(){var r={scalar:{},sequence:{},mapping:{},fallback:{}},e,t;function i(n){r[n.kind][n.tag]=r.fallback[n.tag]=n}for(e=0,t=arguments.length;e{"use strict";var Hfe=si();QU.exports=new Hfe("tag:yaml.org,2002:str",{kind:"scalar",construct:function(r){return r!==null?r:""}})});var xU=w((xZe,vU)=>{"use strict";var Gfe=si();vU.exports=new Gfe("tag:yaml.org,2002:seq",{kind:"sequence",construct:function(r){return r!==null?r:[]}})});var DU=w((PZe,PU)=>{"use strict";var Yfe=si();PU.exports=new Yfe("tag:yaml.org,2002:map",{kind:"mapping",construct:function(r){return r!==null?r:{}}})});var CI=w((DZe,kU)=>{"use strict";var jfe=rc();kU.exports=new jfe({explicit:[SU(),xU(),DU()]})});var FU=w((kZe,RU)=>{"use strict";var qfe=si();function Jfe(r){if(r===null)return!0;var e=r.length;return e===1&&r==="~"||e===4&&(r==="null"||r==="Null"||r==="NULL")}function Wfe(){return null}function zfe(r){return r===null}RU.exports=new qfe("tag:yaml.org,2002:null",{kind:"scalar",resolve:Jfe,construct:Wfe,predicate:zfe,represent:{canonical:function(){return"~"},lowercase:function(){return"null"},uppercase:function(){return"NULL"},camelcase:function(){return"Null"}},defaultStyle:"lowercase"})});var TU=w((RZe,NU)=>{"use strict";var Vfe=si();function Xfe(r){if(r===null)return!1;var e=r.length;return e===4&&(r==="true"||r==="True"||r==="TRUE")||e===5&&(r==="false"||r==="False"||r==="FALSE")}function Zfe(r){return r==="true"||r==="True"||r==="TRUE"}function _fe(r){return Object.prototype.toString.call(r)==="[object Boolean]"}NU.exports=new Vfe("tag:yaml.org,2002:bool",{kind:"scalar",resolve:Xfe,construct:Zfe,predicate:_fe,represent:{lowercase:function(r){return r?"true":"false"},uppercase:function(r){return r?"TRUE":"FALSE"},camelcase:function(r){return r?"True":"False"}},defaultStyle:"lowercase"})});var MU=w((FZe,LU)=>{"use strict";var $fe=tc(),ehe=si();function the(r){return 48<=r&&r<=57||65<=r&&r<=70||97<=r&&r<=102}function rhe(r){return 48<=r&&r<=55}function ihe(r){return 48<=r&&r<=57}function nhe(r){if(r===null)return!1;var e=r.length,t=0,i=!1,n;if(!e)return!1;if(n=r[t],(n==="-"||n==="+")&&(n=r[++t]),n==="0"){if(t+1===e)return!0;if(n=r[++t],n==="b"){for(t++;t=0?"0b"+r.toString(2):"-0b"+r.toString(2).slice(1)},octal:function(r){return r>=0?"0"+r.toString(8):"-0"+r.toString(8).slice(1)},decimal:function(r){return r.toString(10)},hexadecimal:function(r){return r>=0?"0x"+r.toString(16).toUpperCase():"-0x"+r.toString(16).toUpperCase().slice(1)}},defaultStyle:"decimal",styleAliases:{binary:[2,"bin"],octal:[8,"oct"],decimal:[10,"dec"],hexadecimal:[16,"hex"]}})});var UU=w((NZe,KU)=>{"use strict";var OU=tc(),ahe=si(),Ahe=new RegExp("^(?:[-+]?(?:0|[1-9][0-9_]*)(?:\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*|[-+]?\\.(?:inf|Inf|INF)|\\.(?:nan|NaN|NAN))$");function lhe(r){return!(r===null||!Ahe.test(r)||r[r.length-1]==="_")}function che(r){var e,t,i,n;return e=r.replace(/_/g,"").toLowerCase(),t=e[0]==="-"?-1:1,n=[],"+-".indexOf(e[0])>=0&&(e=e.slice(1)),e===".inf"?t===1?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:e===".nan"?NaN:e.indexOf(":")>=0?(e.split(":").forEach(function(s){n.unshift(parseFloat(s,10))}),e=0,i=1,n.forEach(function(s){e+=s*i,i*=60}),t*e):t*parseFloat(e,10)}var uhe=/^[-+]?[0-9]+e/;function ghe(r,e){var t;if(isNaN(r))switch(e){case"lowercase":return".nan";case"uppercase":return".NAN";case"camelcase":return".NaN"}else if(Number.POSITIVE_INFINITY===r)switch(e){case"lowercase":return".inf";case"uppercase":return".INF";case"camelcase":return".Inf"}else if(Number.NEGATIVE_INFINITY===r)switch(e){case"lowercase":return"-.inf";case"uppercase":return"-.INF";case"camelcase":return"-.Inf"}else if(OU.isNegativeZero(r))return"-0.0";return t=r.toString(10),uhe.test(t)?t.replace("e",".e"):t}function fhe(r){return Object.prototype.toString.call(r)==="[object Number]"&&(r%1!==0||OU.isNegativeZero(r))}KU.exports=new ahe("tag:yaml.org,2002:float",{kind:"scalar",resolve:lhe,construct:che,predicate:fhe,represent:ghe,defaultStyle:"lowercase"})});var YS=w((TZe,HU)=>{"use strict";var hhe=rc();HU.exports=new hhe({include:[CI()],implicit:[FU(),TU(),MU(),UU()]})});var jS=w((LZe,GU)=>{"use strict";var phe=rc();GU.exports=new phe({include:[YS()]})});var JU=w((MZe,qU)=>{"use strict";var dhe=si(),YU=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$"),jU=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$");function Che(r){return r===null?!1:YU.exec(r)!==null||jU.exec(r)!==null}function mhe(r){var e,t,i,n,s,o,a,l=0,c=null,u,g,f;if(e=YU.exec(r),e===null&&(e=jU.exec(r)),e===null)throw new Error("Date resolve error");if(t=+e[1],i=+e[2]-1,n=+e[3],!e[4])return new Date(Date.UTC(t,i,n));if(s=+e[4],o=+e[5],a=+e[6],e[7]){for(l=e[7].slice(0,3);l.length<3;)l+="0";l=+l}return e[9]&&(u=+e[10],g=+(e[11]||0),c=(u*60+g)*6e4,e[9]==="-"&&(c=-c)),f=new Date(Date.UTC(t,i,n,s,o,a,l)),c&&f.setTime(f.getTime()-c),f}function Ehe(r){return r.toISOString()}qU.exports=new dhe("tag:yaml.org,2002:timestamp",{kind:"scalar",resolve:Che,construct:mhe,instanceOf:Date,represent:Ehe})});var zU=w((OZe,WU)=>{"use strict";var Ihe=si();function yhe(r){return r==="<<"||r===null}WU.exports=new Ihe("tag:yaml.org,2002:merge",{kind:"scalar",resolve:yhe})});var ZU=w((KZe,XU)=>{"use strict";var ic;try{VU=J,ic=VU("buffer").Buffer}catch{}var VU,whe=si(),qS=`ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/= +\r`;function Bhe(r){if(r===null)return!1;var e,t,i=0,n=r.length,s=qS;for(t=0;t64)){if(e<0)return!1;i+=6}return i%8===0}function bhe(r){var e,t,i=r.replace(/[\r\n=]/g,""),n=i.length,s=qS,o=0,a=[];for(e=0;e>16&255),a.push(o>>8&255),a.push(o&255)),o=o<<6|s.indexOf(i.charAt(e));return t=n%4*6,t===0?(a.push(o>>16&255),a.push(o>>8&255),a.push(o&255)):t===18?(a.push(o>>10&255),a.push(o>>2&255)):t===12&&a.push(o>>4&255),ic?ic.from?ic.from(a):new ic(a):a}function Qhe(r){var e="",t=0,i,n,s=r.length,o=qS;for(i=0;i>18&63],e+=o[t>>12&63],e+=o[t>>6&63],e+=o[t&63]),t=(t<<8)+r[i];return n=s%3,n===0?(e+=o[t>>18&63],e+=o[t>>12&63],e+=o[t>>6&63],e+=o[t&63]):n===2?(e+=o[t>>10&63],e+=o[t>>4&63],e+=o[t<<2&63],e+=o[64]):n===1&&(e+=o[t>>2&63],e+=o[t<<4&63],e+=o[64],e+=o[64]),e}function She(r){return ic&&ic.isBuffer(r)}XU.exports=new whe("tag:yaml.org,2002:binary",{kind:"scalar",resolve:Bhe,construct:bhe,predicate:She,represent:Qhe})});var $U=w((HZe,_U)=>{"use strict";var vhe=si(),xhe=Object.prototype.hasOwnProperty,Phe=Object.prototype.toString;function Dhe(r){if(r===null)return!0;var e=[],t,i,n,s,o,a=r;for(t=0,i=a.length;t{"use strict";var Rhe=si(),Fhe=Object.prototype.toString;function Nhe(r){if(r===null)return!0;var e,t,i,n,s,o=r;for(s=new Array(o.length),e=0,t=o.length;e{"use strict";var Lhe=si(),Mhe=Object.prototype.hasOwnProperty;function Ohe(r){if(r===null)return!0;var e,t=r;for(e in t)if(Mhe.call(t,e)&&t[e]!==null)return!1;return!0}function Khe(r){return r!==null?r:{}}r2.exports=new Lhe("tag:yaml.org,2002:set",{kind:"mapping",resolve:Ohe,construct:Khe})});var Lg=w((jZe,n2)=>{"use strict";var Uhe=rc();n2.exports=new Uhe({include:[jS()],implicit:[JU(),zU()],explicit:[ZU(),$U(),t2(),i2()]})});var o2=w((qZe,s2)=>{"use strict";var Hhe=si();function Ghe(){return!0}function Yhe(){}function jhe(){return""}function qhe(r){return typeof r>"u"}s2.exports=new Hhe("tag:yaml.org,2002:js/undefined",{kind:"scalar",resolve:Ghe,construct:Yhe,predicate:qhe,represent:jhe})});var A2=w((JZe,a2)=>{"use strict";var Jhe=si();function Whe(r){if(r===null||r.length===0)return!1;var e=r,t=/\/([gim]*)$/.exec(r),i="";return!(e[0]==="/"&&(t&&(i=t[1]),i.length>3||e[e.length-i.length-1]!=="/"))}function zhe(r){var e=r,t=/\/([gim]*)$/.exec(r),i="";return e[0]==="/"&&(t&&(i=t[1]),e=e.slice(1,e.length-i.length-1)),new RegExp(e,i)}function Vhe(r){var e="/"+r.source+"/";return r.global&&(e+="g"),r.multiline&&(e+="m"),r.ignoreCase&&(e+="i"),e}function Xhe(r){return Object.prototype.toString.call(r)==="[object RegExp]"}a2.exports=new Jhe("tag:yaml.org,2002:js/regexp",{kind:"scalar",resolve:Whe,construct:zhe,predicate:Xhe,represent:Vhe})});var u2=w((WZe,c2)=>{"use strict";var mI;try{l2=J,mI=l2("esprima")}catch{typeof window<"u"&&(mI=window.esprima)}var l2,Zhe=si();function _he(r){if(r===null)return!1;try{var e="("+r+")",t=mI.parse(e,{range:!0});return!(t.type!=="Program"||t.body.length!==1||t.body[0].type!=="ExpressionStatement"||t.body[0].expression.type!=="ArrowFunctionExpression"&&t.body[0].expression.type!=="FunctionExpression")}catch{return!1}}function $he(r){var e="("+r+")",t=mI.parse(e,{range:!0}),i=[],n;if(t.type!=="Program"||t.body.length!==1||t.body[0].type!=="ExpressionStatement"||t.body[0].expression.type!=="ArrowFunctionExpression"&&t.body[0].expression.type!=="FunctionExpression")throw new Error("Failed to resolve function");return t.body[0].expression.params.forEach(function(s){i.push(s.name)}),n=t.body[0].expression.body.range,t.body[0].expression.body.type==="BlockStatement"?new Function(i,e.slice(n[0]+1,n[1]-1)):new Function(i,"return "+e.slice(n[0],n[1]))}function epe(r){return r.toString()}function tpe(r){return Object.prototype.toString.call(r)==="[object Function]"}c2.exports=new Zhe("tag:yaml.org,2002:js/function",{kind:"scalar",resolve:_he,construct:$he,predicate:tpe,represent:epe})});var Xp=w((VZe,f2)=>{"use strict";var g2=rc();f2.exports=g2.DEFAULT=new g2({include:[Lg()],explicit:[o2(),A2(),u2()]})});var R2=w((XZe,Zp)=>{"use strict";var Ba=tc(),I2=Ng(),rpe=IU(),y2=Lg(),ipe=Xp(),RA=Object.prototype.hasOwnProperty,EI=1,w2=2,B2=3,II=4,JS=1,npe=2,h2=3,spe=/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F\uFFFE\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/,ope=/[\x85\u2028\u2029]/,ape=/[,\[\]\{\}]/,b2=/^(?:!|!!|![a-z\-]+!)$/i,Q2=/^(?:!|[^,\[\]\{\}])(?:%[0-9a-f]{2}|[0-9a-z\-#;\/\?:@&=\+\$,_\.!~\*'\(\)\[\]])*$/i;function p2(r){return Object.prototype.toString.call(r)}function vo(r){return r===10||r===13}function sc(r){return r===9||r===32}function fn(r){return r===9||r===32||r===10||r===13}function Mg(r){return r===44||r===91||r===93||r===123||r===125}function Ape(r){var e;return 48<=r&&r<=57?r-48:(e=r|32,97<=e&&e<=102?e-97+10:-1)}function lpe(r){return r===120?2:r===117?4:r===85?8:0}function cpe(r){return 48<=r&&r<=57?r-48:-1}function d2(r){return r===48?"\0":r===97?"\x07":r===98?"\b":r===116||r===9?" ":r===110?` +`:r===118?"\v":r===102?"\f":r===114?"\r":r===101?"\x1B":r===32?" ":r===34?'"':r===47?"/":r===92?"\\":r===78?"\x85":r===95?"\xA0":r===76?"\u2028":r===80?"\u2029":""}function upe(r){return r<=65535?String.fromCharCode(r):String.fromCharCode((r-65536>>10)+55296,(r-65536&1023)+56320)}var S2=new Array(256),v2=new Array(256);for(nc=0;nc<256;nc++)S2[nc]=d2(nc)?1:0,v2[nc]=d2(nc);var nc;function gpe(r,e){this.input=r,this.filename=e.filename||null,this.schema=e.schema||ipe,this.onWarning=e.onWarning||null,this.legacy=e.legacy||!1,this.json=e.json||!1,this.listener=e.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=r.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.documents=[]}function x2(r,e){return new I2(e,new rpe(r.filename,r.input,r.position,r.line,r.position-r.lineStart))}function ft(r,e){throw x2(r,e)}function yI(r,e){r.onWarning&&r.onWarning.call(null,x2(r,e))}var C2={YAML:function(e,t,i){var n,s,o;e.version!==null&&ft(e,"duplication of %YAML directive"),i.length!==1&&ft(e,"YAML directive accepts exactly one argument"),n=/^([0-9]+)\.([0-9]+)$/.exec(i[0]),n===null&&ft(e,"ill-formed argument of the YAML directive"),s=parseInt(n[1],10),o=parseInt(n[2],10),s!==1&&ft(e,"unacceptable YAML version of the document"),e.version=i[0],e.checkLineBreaks=o<2,o!==1&&o!==2&&yI(e,"unsupported YAML version of the document")},TAG:function(e,t,i){var n,s;i.length!==2&&ft(e,"TAG directive accepts exactly two arguments"),n=i[0],s=i[1],b2.test(n)||ft(e,"ill-formed tag handle (first argument) of the TAG directive"),RA.call(e.tagMap,n)&&ft(e,'there is a previously declared suffix for "'+n+'" tag handle'),Q2.test(s)||ft(e,"ill-formed tag prefix (second argument) of the TAG directive"),e.tagMap[n]=s}};function kA(r,e,t,i){var n,s,o,a;if(e1&&(r.result+=Ba.repeat(` +`,e-1))}function fpe(r,e,t){var i,n,s,o,a,l,c,u,g=r.kind,f=r.result,h;if(h=r.input.charCodeAt(r.position),fn(h)||Mg(h)||h===35||h===38||h===42||h===33||h===124||h===62||h===39||h===34||h===37||h===64||h===96||(h===63||h===45)&&(n=r.input.charCodeAt(r.position+1),fn(n)||t&&Mg(n)))return!1;for(r.kind="scalar",r.result="",s=o=r.position,a=!1;h!==0;){if(h===58){if(n=r.input.charCodeAt(r.position+1),fn(n)||t&&Mg(n))break}else if(h===35){if(i=r.input.charCodeAt(r.position-1),fn(i))break}else{if(r.position===r.lineStart&&wI(r)||t&&Mg(h))break;if(vo(h))if(l=r.line,c=r.lineStart,u=r.lineIndent,zr(r,!1,-1),r.lineIndent>=e){a=!0,h=r.input.charCodeAt(r.position);continue}else{r.position=o,r.line=l,r.lineStart=c,r.lineIndent=u;break}}a&&(kA(r,s,o,!1),zS(r,r.line-l),s=o=r.position,a=!1),sc(h)||(o=r.position+1),h=r.input.charCodeAt(++r.position)}return kA(r,s,o,!1),r.result?!0:(r.kind=g,r.result=f,!1)}function hpe(r,e){var t,i,n;if(t=r.input.charCodeAt(r.position),t!==39)return!1;for(r.kind="scalar",r.result="",r.position++,i=n=r.position;(t=r.input.charCodeAt(r.position))!==0;)if(t===39)if(kA(r,i,r.position,!0),t=r.input.charCodeAt(++r.position),t===39)i=r.position,r.position++,n=r.position;else return!0;else vo(t)?(kA(r,i,n,!0),zS(r,zr(r,!1,e)),i=n=r.position):r.position===r.lineStart&&wI(r)?ft(r,"unexpected end of the document within a single quoted scalar"):(r.position++,n=r.position);ft(r,"unexpected end of the stream within a single quoted scalar")}function ppe(r,e){var t,i,n,s,o,a;if(a=r.input.charCodeAt(r.position),a!==34)return!1;for(r.kind="scalar",r.result="",r.position++,t=i=r.position;(a=r.input.charCodeAt(r.position))!==0;){if(a===34)return kA(r,t,r.position,!0),r.position++,!0;if(a===92){if(kA(r,t,r.position,!0),a=r.input.charCodeAt(++r.position),vo(a))zr(r,!1,e);else if(a<256&&S2[a])r.result+=v2[a],r.position++;else if((o=lpe(a))>0){for(n=o,s=0;n>0;n--)a=r.input.charCodeAt(++r.position),(o=Ape(a))>=0?s=(s<<4)+o:ft(r,"expected hexadecimal character");r.result+=upe(s),r.position++}else ft(r,"unknown escape sequence");t=i=r.position}else vo(a)?(kA(r,t,i,!0),zS(r,zr(r,!1,e)),t=i=r.position):r.position===r.lineStart&&wI(r)?ft(r,"unexpected end of the document within a double quoted scalar"):(r.position++,i=r.position)}ft(r,"unexpected end of the stream within a double quoted scalar")}function dpe(r,e){var t=!0,i,n=r.tag,s,o=r.anchor,a,l,c,u,g,f={},h,p,C,y;if(y=r.input.charCodeAt(r.position),y===91)l=93,g=!1,s=[];else if(y===123)l=125,g=!0,s={};else return!1;for(r.anchor!==null&&(r.anchorMap[r.anchor]=s),y=r.input.charCodeAt(++r.position);y!==0;){if(zr(r,!0,e),y=r.input.charCodeAt(r.position),y===l)return r.position++,r.tag=n,r.anchor=o,r.kind=g?"mapping":"sequence",r.result=s,!0;t||ft(r,"missed comma between flow collection entries"),p=h=C=null,c=u=!1,y===63&&(a=r.input.charCodeAt(r.position+1),fn(a)&&(c=u=!0,r.position++,zr(r,!0,e))),i=r.line,Kg(r,e,EI,!1,!0),p=r.tag,h=r.result,zr(r,!0,e),y=r.input.charCodeAt(r.position),(u||r.line===i)&&y===58&&(c=!0,y=r.input.charCodeAt(++r.position),zr(r,!0,e),Kg(r,e,EI,!1,!0),C=r.result),g?Og(r,s,f,p,h,C):c?s.push(Og(r,null,f,p,h,C)):s.push(h),zr(r,!0,e),y=r.input.charCodeAt(r.position),y===44?(t=!0,y=r.input.charCodeAt(++r.position)):t=!1}ft(r,"unexpected end of the stream within a flow collection")}function Cpe(r,e){var t,i,n=JS,s=!1,o=!1,a=e,l=0,c=!1,u,g;if(g=r.input.charCodeAt(r.position),g===124)i=!1;else if(g===62)i=!0;else return!1;for(r.kind="scalar",r.result="";g!==0;)if(g=r.input.charCodeAt(++r.position),g===43||g===45)JS===n?n=g===43?h2:npe:ft(r,"repeat of a chomping mode identifier");else if((u=cpe(g))>=0)u===0?ft(r,"bad explicit indentation width of a block scalar; it cannot be less than one"):o?ft(r,"repeat of an indentation width identifier"):(a=e+u-1,o=!0);else break;if(sc(g)){do g=r.input.charCodeAt(++r.position);while(sc(g));if(g===35)do g=r.input.charCodeAt(++r.position);while(!vo(g)&&g!==0)}for(;g!==0;){for(WS(r),r.lineIndent=0,g=r.input.charCodeAt(r.position);(!o||r.lineIndenta&&(a=r.lineIndent),vo(g)){l++;continue}if(r.lineIndente)&&l!==0)ft(r,"bad indentation of a sequence entry");else if(r.lineIndente)&&(Kg(r,e,II,!0,n)&&(p?f=r.result:h=r.result),p||(Og(r,c,u,g,f,h,s,o),g=f=h=null),zr(r,!0,-1),y=r.input.charCodeAt(r.position)),r.lineIndent>e&&y!==0)ft(r,"bad indentation of a mapping entry");else if(r.lineIndente?l=1:r.lineIndent===e?l=0:r.lineIndente?l=1:r.lineIndent===e?l=0:r.lineIndent tag; it should be "scalar", not "'+r.kind+'"'),g=0,f=r.implicitTypes.length;g tag; it should be "'+h.kind+'", not "'+r.kind+'"'),h.resolve(r.result)?(r.result=h.construct(r.result),r.anchor!==null&&(r.anchorMap[r.anchor]=r.result)):ft(r,"cannot resolve a node with !<"+r.tag+"> explicit tag")):ft(r,"unknown tag !<"+r.tag+">");return r.listener!==null&&r.listener("close",r),r.tag!==null||r.anchor!==null||u}function wpe(r){var e=r.position,t,i,n,s=!1,o;for(r.version=null,r.checkLineBreaks=r.legacy,r.tagMap={},r.anchorMap={};(o=r.input.charCodeAt(r.position))!==0&&(zr(r,!0,-1),o=r.input.charCodeAt(r.position),!(r.lineIndent>0||o!==37));){for(s=!0,o=r.input.charCodeAt(++r.position),t=r.position;o!==0&&!fn(o);)o=r.input.charCodeAt(++r.position);for(i=r.input.slice(t,r.position),n=[],i.length<1&&ft(r,"directive name must not be less than one character in length");o!==0;){for(;sc(o);)o=r.input.charCodeAt(++r.position);if(o===35){do o=r.input.charCodeAt(++r.position);while(o!==0&&!vo(o));break}if(vo(o))break;for(t=r.position;o!==0&&!fn(o);)o=r.input.charCodeAt(++r.position);n.push(r.input.slice(t,r.position))}o!==0&&WS(r),RA.call(C2,i)?C2[i](r,i,n):yI(r,'unknown document directive "'+i+'"')}if(zr(r,!0,-1),r.lineIndent===0&&r.input.charCodeAt(r.position)===45&&r.input.charCodeAt(r.position+1)===45&&r.input.charCodeAt(r.position+2)===45?(r.position+=3,zr(r,!0,-1)):s&&ft(r,"directives end mark is expected"),Kg(r,r.lineIndent-1,II,!1,!0),zr(r,!0,-1),r.checkLineBreaks&&ope.test(r.input.slice(e,r.position))&&yI(r,"non-ASCII line breaks are interpreted as content"),r.documents.push(r.result),r.position===r.lineStart&&wI(r)){r.input.charCodeAt(r.position)===46&&(r.position+=3,zr(r,!0,-1));return}if(r.position"u"&&(t=e,e=null);var i=P2(r,t);if(typeof e!="function")return i;for(var n=0,s=i.length;n"u"&&(t=e,e=null),D2(r,e,Ba.extend({schema:y2},t))}function bpe(r,e){return k2(r,Ba.extend({schema:y2},e))}Zp.exports.loadAll=D2;Zp.exports.load=k2;Zp.exports.safeLoadAll=Bpe;Zp.exports.safeLoad=bpe});var tH=w((ZZe,_S)=>{"use strict";var $p=tc(),ed=Ng(),Qpe=Xp(),Spe=Lg(),U2=Object.prototype.toString,H2=Object.prototype.hasOwnProperty,vpe=9,_p=10,xpe=13,Ppe=32,Dpe=33,kpe=34,G2=35,Rpe=37,Fpe=38,Npe=39,Tpe=42,Y2=44,Lpe=45,j2=58,Mpe=61,Ope=62,Kpe=63,Upe=64,q2=91,J2=93,Hpe=96,W2=123,Gpe=124,z2=125,Ni={};Ni[0]="\\0";Ni[7]="\\a";Ni[8]="\\b";Ni[9]="\\t";Ni[10]="\\n";Ni[11]="\\v";Ni[12]="\\f";Ni[13]="\\r";Ni[27]="\\e";Ni[34]='\\"';Ni[92]="\\\\";Ni[133]="\\N";Ni[160]="\\_";Ni[8232]="\\L";Ni[8233]="\\P";var Ype=["y","Y","yes","Yes","YES","on","On","ON","n","N","no","No","NO","off","Off","OFF"];function jpe(r,e){var t,i,n,s,o,a,l;if(e===null)return{};for(t={},i=Object.keys(e),n=0,s=i.length;n0?r.charCodeAt(s-1):null,f=f&&T2(o,a)}else{for(s=0;si&&r[g+1]!==" ",g=s);else if(!Ug(o))return BI;a=s>0?r.charCodeAt(s-1):null,f=f&&T2(o,a)}c=c||u&&s-g-1>i&&r[g+1]!==" "}return!l&&!c?f&&!n(r)?X2:Z2:t>9&&V2(r)?BI:c?$2:_2}function Xpe(r,e,t,i){r.dump=function(){if(e.length===0)return"''";if(!r.noCompatMode&&Ype.indexOf(e)!==-1)return"'"+e+"'";var n=r.indent*Math.max(1,t),s=r.lineWidth===-1?-1:Math.max(Math.min(r.lineWidth,40),r.lineWidth-n),o=i||r.flowLevel>-1&&t>=r.flowLevel;function a(l){return Jpe(r,l)}switch(Vpe(e,o,r.indent,s,a)){case X2:return e;case Z2:return"'"+e.replace(/'/g,"''")+"'";case _2:return"|"+L2(e,r.indent)+M2(N2(e,n));case $2:return">"+L2(e,r.indent)+M2(N2(Zpe(e,s),n));case BI:return'"'+_pe(e,s)+'"';default:throw new ed("impossible error: invalid scalar style")}}()}function L2(r,e){var t=V2(r)?String(e):"",i=r[r.length-1]===` +`,n=i&&(r[r.length-2]===` +`||r===` +`),s=n?"+":i?"":"-";return t+s+` +`}function M2(r){return r[r.length-1]===` +`?r.slice(0,-1):r}function Zpe(r,e){for(var t=/(\n+)([^\n]*)/g,i=function(){var c=r.indexOf(` +`);return c=c!==-1?c:r.length,t.lastIndex=c,O2(r.slice(0,c),e)}(),n=r[0]===` +`||r[0]===" ",s,o;o=t.exec(r);){var a=o[1],l=o[2];s=l[0]===" ",i+=a+(!n&&!s&&l!==""?` +`:"")+O2(l,e),n=s}return i}function O2(r,e){if(r===""||r[0]===" ")return r;for(var t=/ [^ ]/g,i,n=0,s,o=0,a=0,l="";i=t.exec(r);)a=i.index,a-n>e&&(s=o>n?o:a,l+=` +`+r.slice(n,s),n=s+1),o=a;return l+=` +`,r.length-n>e&&o>n?l+=r.slice(n,o)+` +`+r.slice(o+1):l+=r.slice(n),l.slice(1)}function _pe(r){for(var e="",t,i,n,s=0;s=55296&&t<=56319&&(i=r.charCodeAt(s+1),i>=56320&&i<=57343)){e+=F2((t-55296)*1024+i-56320+65536),s++;continue}n=Ni[t],e+=!n&&Ug(t)?r[s]:n||F2(t)}return e}function $pe(r,e,t){var i="",n=r.tag,s,o;for(s=0,o=t.length;s1024&&(u+="? "),u+=r.dump+(r.condenseFlow?'"':"")+":"+(r.condenseFlow?"":" "),oc(r,e,c,!1,!1)&&(u+=r.dump,i+=u));r.tag=n,r.dump="{"+i+"}"}function rde(r,e,t,i){var n="",s=r.tag,o=Object.keys(t),a,l,c,u,g,f;if(r.sortKeys===!0)o.sort();else if(typeof r.sortKeys=="function")o.sort(r.sortKeys);else if(r.sortKeys)throw new ed("sortKeys must be a boolean or a function");for(a=0,l=o.length;a1024,g&&(r.dump&&_p===r.dump.charCodeAt(0)?f+="?":f+="? "),f+=r.dump,g&&(f+=VS(r,e)),oc(r,e+1,u,!0,g)&&(r.dump&&_p===r.dump.charCodeAt(0)?f+=":":f+=": ",f+=r.dump,n+=f));r.tag=s,r.dump=n||"{}"}function K2(r,e,t){var i,n,s,o,a,l;for(n=t?r.explicitTypes:r.implicitTypes,s=0,o=n.length;s tag resolver accepts not "'+l+'" style');r.dump=i}return!0}return!1}function oc(r,e,t,i,n,s){r.tag=null,r.dump=t,K2(r,t,!1)||K2(r,t,!0);var o=U2.call(r.dump);i&&(i=r.flowLevel<0||r.flowLevel>e);var a=o==="[object Object]"||o==="[object Array]",l,c;if(a&&(l=r.duplicates.indexOf(t),c=l!==-1),(r.tag!==null&&r.tag!=="?"||c||r.indent!==2&&e>0)&&(n=!1),c&&r.usedDuplicates[l])r.dump="*ref_"+l;else{if(a&&c&&!r.usedDuplicates[l]&&(r.usedDuplicates[l]=!0),o==="[object Object]")i&&Object.keys(r.dump).length!==0?(rde(r,e,r.dump,n),c&&(r.dump="&ref_"+l+r.dump)):(tde(r,e,r.dump),c&&(r.dump="&ref_"+l+" "+r.dump));else if(o==="[object Array]"){var u=r.noArrayIndent&&e>0?e-1:e;i&&r.dump.length!==0?(ede(r,u,r.dump,n),c&&(r.dump="&ref_"+l+r.dump)):($pe(r,u,r.dump),c&&(r.dump="&ref_"+l+" "+r.dump))}else if(o==="[object String]")r.tag!=="?"&&Xpe(r,r.dump,e,s);else{if(r.skipInvalid)return!1;throw new ed("unacceptable kind of an object to dump "+o)}r.tag!==null&&r.tag!=="?"&&(r.dump="!<"+r.tag+"> "+r.dump)}return!0}function ide(r,e){var t=[],i=[],n,s;for(XS(r,t,i),n=0,s=i.length;n{"use strict";var bI=R2(),rH=tH();function QI(r){return function(){throw new Error("Function "+r+" is deprecated and cannot be used.")}}Fr.exports.Type=si();Fr.exports.Schema=rc();Fr.exports.FAILSAFE_SCHEMA=CI();Fr.exports.JSON_SCHEMA=YS();Fr.exports.CORE_SCHEMA=jS();Fr.exports.DEFAULT_SAFE_SCHEMA=Lg();Fr.exports.DEFAULT_FULL_SCHEMA=Xp();Fr.exports.load=bI.load;Fr.exports.loadAll=bI.loadAll;Fr.exports.safeLoad=bI.safeLoad;Fr.exports.safeLoadAll=bI.safeLoadAll;Fr.exports.dump=rH.dump;Fr.exports.safeDump=rH.safeDump;Fr.exports.YAMLException=Ng();Fr.exports.MINIMAL_SCHEMA=CI();Fr.exports.SAFE_SCHEMA=Lg();Fr.exports.DEFAULT_SCHEMA=Xp();Fr.exports.scan=QI("scan");Fr.exports.parse=QI("parse");Fr.exports.compose=QI("compose");Fr.exports.addConstructor=QI("addConstructor")});var sH=w(($Ze,nH)=>{"use strict";var sde=iH();nH.exports=sde});var aH=w((e_e,oH)=>{"use strict";function ode(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function ac(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,ac)}ode(ac,Error);ac.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g({[Ke]:Ce})))},H=function(R){return R},j=function(R){return R},$=Us("correct indentation"),V=" ",W=ar(" ",!1),_=function(R){return R.length===QA*yg},A=function(R){return R.length===(QA+1)*yg},Ae=function(){return QA++,!0},ge=function(){return QA--,!0},re=function(){return pg()},M=Us("pseudostring"),F=/^[^\r\n\t ?:,\][{}#&*!|>'"%@`\-]/,ue=Tn(["\r",` +`," "," ","?",":",",","]","[","{","}","#","&","*","!","|",">","'",'"',"%","@","`","-"],!0,!1),pe=/^[^\r\n\t ,\][{}:#"']/,ke=Tn(["\r",` +`," "," ",",","]","[","{","}",":","#",'"',"'"],!0,!1),Fe=function(){return pg().replace(/^ *| *$/g,"")},Ne="--",oe=ar("--",!1),le=/^[a-zA-Z\/0-9]/,Be=Tn([["a","z"],["A","Z"],"/",["0","9"]],!1,!1),fe=/^[^\r\n\t :,]/,ae=Tn(["\r",` +`," "," ",":",","],!0,!1),qe="null",ne=ar("null",!1),Y=function(){return null},he="true",ie=ar("true",!1),de=function(){return!0},_e="false",Pt=ar("false",!1),It=function(){return!1},Mr=Us("string"),ii='"',gi=ar('"',!1),hr=function(){return""},fi=function(R){return R},ni=function(R){return R.join("")},Ks=/^[^"\\\0-\x1F\x7F]/,pr=Tn(['"',"\\",["\0",""],"\x7F"],!0,!1),Ii='\\"',rs=ar('\\"',!1),fa=function(){return'"'},CA="\\\\",cg=ar("\\\\",!1),is=function(){return"\\"},mA="\\/",ha=ar("\\/",!1),wp=function(){return"/"},EA="\\b",IA=ar("\\b",!1),wr=function(){return"\b"},Tl="\\f",ug=ar("\\f",!1),Io=function(){return"\f"},gg="\\n",Bp=ar("\\n",!1),bp=function(){return` +`},vr="\\r",se=ar("\\r",!1),yo=function(){return"\r"},Fn="\\t",fg=ar("\\t",!1),bt=function(){return" "},Ll="\\u",Nn=ar("\\u",!1),ns=function(R,q,Ce,Ke){return String.fromCharCode(parseInt(`0x${R}${q}${Ce}${Ke}`))},ss=/^[0-9a-fA-F]/,gt=Tn([["0","9"],["a","f"],["A","F"]],!1,!1),wo=Us("blank space"),At=/^[ \t]/,ln=Tn([" "," "],!1,!1),S=Us("white space"),Lt=/^[ \t\n\r]/,hg=Tn([" "," ",` +`,"\r"],!1,!1),Ml=`\r +`,Qp=ar(`\r +`,!1),Sp=` +`,vp=ar(` +`,!1),xp="\r",Pp=ar("\r",!1),G=0,yt=0,yA=[{line:1,column:1}],zi=0,Ol=[],Xe=0,pa;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function pg(){return r.substring(yt,G)}function ME(){return cn(yt,G)}function Dp(R,q){throw q=q!==void 0?q:cn(yt,G),Ul([Us(R)],r.substring(yt,G),q)}function OE(R,q){throw q=q!==void 0?q:cn(yt,G),dg(R,q)}function ar(R,q){return{type:"literal",text:R,ignoreCase:q}}function Tn(R,q,Ce){return{type:"class",parts:R,inverted:q,ignoreCase:Ce}}function Kl(){return{type:"any"}}function kp(){return{type:"end"}}function Us(R){return{type:"other",description:R}}function da(R){var q=yA[R],Ce;if(q)return q;for(Ce=R-1;!yA[Ce];)Ce--;for(q=yA[Ce],q={line:q.line,column:q.column};Cezi&&(zi=G,Ol=[]),Ol.push(R))}function dg(R,q){return new ac(R,null,null,q)}function Ul(R,q,Ce){return new ac(ac.buildMessage(R,q),R,q,Ce)}function Hs(){var R;return R=Cg(),R}function Hl(){var R,q,Ce;for(R=G,q=[],Ce=wA();Ce!==t;)q.push(Ce),Ce=wA();return q!==t&&(yt=R,q=s(q)),R=q,R}function wA(){var R,q,Ce,Ke,Re;return R=G,q=ma(),q!==t?(r.charCodeAt(G)===45?(Ce=o,G++):(Ce=t,Xe===0&&Le(a)),Ce!==t?(Ke=Rr(),Ke!==t?(Re=Ca(),Re!==t?(yt=R,q=l(Re),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R}function Cg(){var R,q,Ce;for(R=G,q=[],Ce=mg();Ce!==t;)q.push(Ce),Ce=mg();return q!==t&&(yt=R,q=c(q)),R=q,R}function mg(){var R,q,Ce,Ke,Re,ze,dt,Ft,Ln;if(R=G,q=Rr(),q===t&&(q=null),q!==t){if(Ce=G,r.charCodeAt(G)===35?(Ke=u,G++):(Ke=t,Xe===0&&Le(g)),Ke!==t){if(Re=[],ze=G,dt=G,Xe++,Ft=js(),Xe--,Ft===t?dt=void 0:(G=dt,dt=t),dt!==t?(r.length>G?(Ft=r.charAt(G),G++):(Ft=t,Xe===0&&Le(f)),Ft!==t?(dt=[dt,Ft],ze=dt):(G=ze,ze=t)):(G=ze,ze=t),ze!==t)for(;ze!==t;)Re.push(ze),ze=G,dt=G,Xe++,Ft=js(),Xe--,Ft===t?dt=void 0:(G=dt,dt=t),dt!==t?(r.length>G?(Ft=r.charAt(G),G++):(Ft=t,Xe===0&&Le(f)),Ft!==t?(dt=[dt,Ft],ze=dt):(G=ze,ze=t)):(G=ze,ze=t);else Re=t;Re!==t?(Ke=[Ke,Re],Ce=Ke):(G=Ce,Ce=t)}else G=Ce,Ce=t;if(Ce===t&&(Ce=null),Ce!==t){if(Ke=[],Re=Ys(),Re!==t)for(;Re!==t;)Ke.push(Re),Re=Ys();else Ke=t;Ke!==t?(yt=R,q=h(),R=q):(G=R,R=t)}else G=R,R=t}else G=R,R=t;if(R===t&&(R=G,q=ma(),q!==t?(Ce=Gl(),Ce!==t?(Ke=Rr(),Ke===t&&(Ke=null),Ke!==t?(r.charCodeAt(G)===58?(Re=p,G++):(Re=t,Xe===0&&Le(C)),Re!==t?(ze=Rr(),ze===t&&(ze=null),ze!==t?(dt=Ca(),dt!==t?(yt=R,q=y(Ce,dt),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,q=ma(),q!==t?(Ce=Gs(),Ce!==t?(Ke=Rr(),Ke===t&&(Ke=null),Ke!==t?(r.charCodeAt(G)===58?(Re=p,G++):(Re=t,Xe===0&&Le(C)),Re!==t?(ze=Rr(),ze===t&&(ze=null),ze!==t?(dt=Ca(),dt!==t?(yt=R,q=y(Ce,dt),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t))){if(R=G,q=ma(),q!==t)if(Ce=Gs(),Ce!==t)if(Ke=Rr(),Ke!==t)if(Re=KE(),Re!==t){if(ze=[],dt=Ys(),dt!==t)for(;dt!==t;)ze.push(dt),dt=Ys();else ze=t;ze!==t?(yt=R,q=y(Ce,Re),R=q):(G=R,R=t)}else G=R,R=t;else G=R,R=t;else G=R,R=t;else G=R,R=t;if(R===t)if(R=G,q=ma(),q!==t)if(Ce=Gs(),Ce!==t){if(Ke=[],Re=G,ze=Rr(),ze===t&&(ze=null),ze!==t?(r.charCodeAt(G)===44?(dt=B,G++):(dt=t,Xe===0&&Le(v)),dt!==t?(Ft=Rr(),Ft===t&&(Ft=null),Ft!==t?(Ln=Gs(),Ln!==t?(yt=Re,ze=D(Ce,Ln),Re=ze):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t),Re!==t)for(;Re!==t;)Ke.push(Re),Re=G,ze=Rr(),ze===t&&(ze=null),ze!==t?(r.charCodeAt(G)===44?(dt=B,G++):(dt=t,Xe===0&&Le(v)),dt!==t?(Ft=Rr(),Ft===t&&(Ft=null),Ft!==t?(Ln=Gs(),Ln!==t?(yt=Re,ze=D(Ce,Ln),Re=ze):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t);else Ke=t;Ke!==t?(Re=Rr(),Re===t&&(Re=null),Re!==t?(r.charCodeAt(G)===58?(ze=p,G++):(ze=t,Xe===0&&Le(C)),ze!==t?(dt=Rr(),dt===t&&(dt=null),dt!==t?(Ft=Ca(),Ft!==t?(yt=R,q=T(Ce,Ke,Ft),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)}else G=R,R=t;else G=R,R=t}return R}function Ca(){var R,q,Ce,Ke,Re,ze,dt;if(R=G,q=G,Xe++,Ce=G,Ke=js(),Ke!==t?(Re=rt(),Re!==t?(r.charCodeAt(G)===45?(ze=o,G++):(ze=t,Xe===0&&Le(a)),ze!==t?(dt=Rr(),dt!==t?(Ke=[Ke,Re,ze,dt],Ce=Ke):(G=Ce,Ce=t)):(G=Ce,Ce=t)):(G=Ce,Ce=t)):(G=Ce,Ce=t),Xe--,Ce!==t?(G=q,q=void 0):q=t,q!==t?(Ce=Ys(),Ce!==t?(Ke=Bo(),Ke!==t?(Re=Hl(),Re!==t?(ze=BA(),ze!==t?(yt=R,q=H(Re),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,q=js(),q!==t?(Ce=Bo(),Ce!==t?(Ke=Cg(),Ke!==t?(Re=BA(),Re!==t?(yt=R,q=H(Ke),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t))if(R=G,q=Yl(),q!==t){if(Ce=[],Ke=Ys(),Ke!==t)for(;Ke!==t;)Ce.push(Ke),Ke=Ys();else Ce=t;Ce!==t?(yt=R,q=j(q),R=q):(G=R,R=t)}else G=R,R=t;return R}function ma(){var R,q,Ce;for(Xe++,R=G,q=[],r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Le(W));Ce!==t;)q.push(Ce),r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Le(W));return q!==t?(yt=G,Ce=_(q),Ce?Ce=void 0:Ce=t,Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)):(G=R,R=t),Xe--,R===t&&(q=t,Xe===0&&Le($)),R}function rt(){var R,q,Ce;for(R=G,q=[],r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Le(W));Ce!==t;)q.push(Ce),r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Le(W));return q!==t?(yt=G,Ce=A(q),Ce?Ce=void 0:Ce=t,Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)):(G=R,R=t),R}function Bo(){var R;return yt=G,R=Ae(),R?R=void 0:R=t,R}function BA(){var R;return yt=G,R=ge(),R?R=void 0:R=t,R}function Gl(){var R;return R=jl(),R===t&&(R=Rp()),R}function Gs(){var R,q,Ce;if(R=jl(),R===t){if(R=G,q=[],Ce=Eg(),Ce!==t)for(;Ce!==t;)q.push(Ce),Ce=Eg();else q=t;q!==t&&(yt=R,q=re()),R=q}return R}function Yl(){var R;return R=Fp(),R===t&&(R=UE(),R===t&&(R=jl(),R===t&&(R=Rp()))),R}function KE(){var R;return R=Fp(),R===t&&(R=jl(),R===t&&(R=Eg())),R}function Rp(){var R,q,Ce,Ke,Re,ze;if(Xe++,R=G,F.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Le(ue)),q!==t){for(Ce=[],Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(pe.test(r.charAt(G))?(ze=r.charAt(G),G++):(ze=t,Xe===0&&Le(ke)),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ke!==t;)Ce.push(Ke),Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(pe.test(r.charAt(G))?(ze=r.charAt(G),G++):(ze=t,Xe===0&&Le(ke)),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ce!==t?(yt=R,q=Fe(),R=q):(G=R,R=t)}else G=R,R=t;return Xe--,R===t&&(q=t,Xe===0&&Le(M)),R}function Eg(){var R,q,Ce,Ke,Re;if(R=G,r.substr(G,2)===Ne?(q=Ne,G+=2):(q=t,Xe===0&&Le(oe)),q===t&&(q=null),q!==t)if(le.test(r.charAt(G))?(Ce=r.charAt(G),G++):(Ce=t,Xe===0&&Le(Be)),Ce!==t){for(Ke=[],fe.test(r.charAt(G))?(Re=r.charAt(G),G++):(Re=t,Xe===0&&Le(ae));Re!==t;)Ke.push(Re),fe.test(r.charAt(G))?(Re=r.charAt(G),G++):(Re=t,Xe===0&&Le(ae));Ke!==t?(yt=R,q=Fe(),R=q):(G=R,R=t)}else G=R,R=t;else G=R,R=t;return R}function Fp(){var R,q;return R=G,r.substr(G,4)===qe?(q=qe,G+=4):(q=t,Xe===0&&Le(ne)),q!==t&&(yt=R,q=Y()),R=q,R}function UE(){var R,q;return R=G,r.substr(G,4)===he?(q=he,G+=4):(q=t,Xe===0&&Le(ie)),q!==t&&(yt=R,q=de()),R=q,R===t&&(R=G,r.substr(G,5)===_e?(q=_e,G+=5):(q=t,Xe===0&&Le(Pt)),q!==t&&(yt=R,q=It()),R=q),R}function jl(){var R,q,Ce,Ke;return Xe++,R=G,r.charCodeAt(G)===34?(q=ii,G++):(q=t,Xe===0&&Le(gi)),q!==t?(r.charCodeAt(G)===34?(Ce=ii,G++):(Ce=t,Xe===0&&Le(gi)),Ce!==t?(yt=R,q=hr(),R=q):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,r.charCodeAt(G)===34?(q=ii,G++):(q=t,Xe===0&&Le(gi)),q!==t?(Ce=HE(),Ce!==t?(r.charCodeAt(G)===34?(Ke=ii,G++):(Ke=t,Xe===0&&Le(gi)),Ke!==t?(yt=R,q=fi(Ce),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)),Xe--,R===t&&(q=t,Xe===0&&Le(Mr)),R}function HE(){var R,q,Ce;if(R=G,q=[],Ce=Ig(),Ce!==t)for(;Ce!==t;)q.push(Ce),Ce=Ig();else q=t;return q!==t&&(yt=R,q=ni(q)),R=q,R}function Ig(){var R,q,Ce,Ke,Re,ze;return Ks.test(r.charAt(G))?(R=r.charAt(G),G++):(R=t,Xe===0&&Le(pr)),R===t&&(R=G,r.substr(G,2)===Ii?(q=Ii,G+=2):(q=t,Xe===0&&Le(rs)),q!==t&&(yt=R,q=fa()),R=q,R===t&&(R=G,r.substr(G,2)===CA?(q=CA,G+=2):(q=t,Xe===0&&Le(cg)),q!==t&&(yt=R,q=is()),R=q,R===t&&(R=G,r.substr(G,2)===mA?(q=mA,G+=2):(q=t,Xe===0&&Le(ha)),q!==t&&(yt=R,q=wp()),R=q,R===t&&(R=G,r.substr(G,2)===EA?(q=EA,G+=2):(q=t,Xe===0&&Le(IA)),q!==t&&(yt=R,q=wr()),R=q,R===t&&(R=G,r.substr(G,2)===Tl?(q=Tl,G+=2):(q=t,Xe===0&&Le(ug)),q!==t&&(yt=R,q=Io()),R=q,R===t&&(R=G,r.substr(G,2)===gg?(q=gg,G+=2):(q=t,Xe===0&&Le(Bp)),q!==t&&(yt=R,q=bp()),R=q,R===t&&(R=G,r.substr(G,2)===vr?(q=vr,G+=2):(q=t,Xe===0&&Le(se)),q!==t&&(yt=R,q=yo()),R=q,R===t&&(R=G,r.substr(G,2)===Fn?(q=Fn,G+=2):(q=t,Xe===0&&Le(fg)),q!==t&&(yt=R,q=bt()),R=q,R===t&&(R=G,r.substr(G,2)===Ll?(q=Ll,G+=2):(q=t,Xe===0&&Le(Nn)),q!==t?(Ce=bA(),Ce!==t?(Ke=bA(),Ke!==t?(Re=bA(),Re!==t?(ze=bA(),ze!==t?(yt=R,q=ns(Ce,Ke,Re,ze),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)))))))))),R}function bA(){var R;return ss.test(r.charAt(G))?(R=r.charAt(G),G++):(R=t,Xe===0&&Le(gt)),R}function Rr(){var R,q;if(Xe++,R=[],At.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Le(ln)),q!==t)for(;q!==t;)R.push(q),At.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Le(ln));else R=t;return Xe--,R===t&&(q=t,Xe===0&&Le(wo)),R}function GE(){var R,q;if(Xe++,R=[],Lt.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Le(hg)),q!==t)for(;q!==t;)R.push(q),Lt.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Le(hg));else R=t;return Xe--,R===t&&(q=t,Xe===0&&Le(S)),R}function Ys(){var R,q,Ce,Ke,Re,ze;if(R=G,q=js(),q!==t){for(Ce=[],Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(ze=js(),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ke!==t;)Ce.push(Ke),Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(ze=js(),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)}else G=R,R=t;return R}function js(){var R;return r.substr(G,2)===Ml?(R=Ml,G+=2):(R=t,Xe===0&&Le(Qp)),R===t&&(r.charCodeAt(G)===10?(R=Sp,G++):(R=t,Xe===0&&Le(vp)),R===t&&(r.charCodeAt(G)===13?(R=xp,G++):(R=t,Xe===0&&Le(Pp)))),R}let yg=2,QA=0;if(pa=n(),pa!==t&&G===r.length)return pa;throw pa!==t&&G{"use strict";var gde=r=>{let e=!1,t=!1,i=!1;for(let n=0;n{if(!(typeof r=="string"||Array.isArray(r)))throw new TypeError("Expected the input to be `string | string[]`");e=Object.assign({pascalCase:!1},e);let t=n=>e.pascalCase?n.charAt(0).toUpperCase()+n.slice(1):n;return Array.isArray(r)?r=r.map(n=>n.trim()).filter(n=>n.length).join("-"):r=r.trim(),r.length===0?"":r.length===1?e.pascalCase?r.toUpperCase():r.toLowerCase():(r!==r.toLowerCase()&&(r=gde(r)),r=r.replace(/^[_.\- ]+/,"").toLowerCase().replace(/[_.\- ]+(\w|$)/g,(n,s)=>s.toUpperCase()).replace(/\d+(\w|$)/g,n=>n.toUpperCase()),t(r))};ev.exports=gH;ev.exports.default=gH});var hH=w((o_e,fde)=>{fde.exports=[{name:"AppVeyor",constant:"APPVEYOR",env:"APPVEYOR",pr:"APPVEYOR_PULL_REQUEST_NUMBER"},{name:"Azure Pipelines",constant:"AZURE_PIPELINES",env:"SYSTEM_TEAMFOUNDATIONCOLLECTIONURI",pr:"SYSTEM_PULLREQUEST_PULLREQUESTID"},{name:"Appcircle",constant:"APPCIRCLE",env:"AC_APPCIRCLE"},{name:"Bamboo",constant:"BAMBOO",env:"bamboo_planKey"},{name:"Bitbucket Pipelines",constant:"BITBUCKET",env:"BITBUCKET_COMMIT",pr:"BITBUCKET_PR_ID"},{name:"Bitrise",constant:"BITRISE",env:"BITRISE_IO",pr:"BITRISE_PULL_REQUEST"},{name:"Buddy",constant:"BUDDY",env:"BUDDY_WORKSPACE_ID",pr:"BUDDY_EXECUTION_PULL_REQUEST_ID"},{name:"Buildkite",constant:"BUILDKITE",env:"BUILDKITE",pr:{env:"BUILDKITE_PULL_REQUEST",ne:"false"}},{name:"CircleCI",constant:"CIRCLE",env:"CIRCLECI",pr:"CIRCLE_PULL_REQUEST"},{name:"Cirrus CI",constant:"CIRRUS",env:"CIRRUS_CI",pr:"CIRRUS_PR"},{name:"AWS CodeBuild",constant:"CODEBUILD",env:"CODEBUILD_BUILD_ARN"},{name:"Codefresh",constant:"CODEFRESH",env:"CF_BUILD_ID",pr:{any:["CF_PULL_REQUEST_NUMBER","CF_PULL_REQUEST_ID"]}},{name:"Codeship",constant:"CODESHIP",env:{CI_NAME:"codeship"}},{name:"Drone",constant:"DRONE",env:"DRONE",pr:{DRONE_BUILD_EVENT:"pull_request"}},{name:"dsari",constant:"DSARI",env:"DSARI"},{name:"GitHub Actions",constant:"GITHUB_ACTIONS",env:"GITHUB_ACTIONS",pr:{GITHUB_EVENT_NAME:"pull_request"}},{name:"GitLab CI",constant:"GITLAB",env:"GITLAB_CI",pr:"CI_MERGE_REQUEST_ID"},{name:"GoCD",constant:"GOCD",env:"GO_PIPELINE_LABEL"},{name:"LayerCI",constant:"LAYERCI",env:"LAYERCI",pr:"LAYERCI_PULL_REQUEST"},{name:"Hudson",constant:"HUDSON",env:"HUDSON_URL"},{name:"Jenkins",constant:"JENKINS",env:["JENKINS_URL","BUILD_ID"],pr:{any:["ghprbPullId","CHANGE_ID"]}},{name:"Magnum CI",constant:"MAGNUM",env:"MAGNUM"},{name:"Netlify CI",constant:"NETLIFY",env:"NETLIFY",pr:{env:"PULL_REQUEST",ne:"false"}},{name:"Nevercode",constant:"NEVERCODE",env:"NEVERCODE",pr:{env:"NEVERCODE_PULL_REQUEST",ne:"false"}},{name:"Render",constant:"RENDER",env:"RENDER",pr:{IS_PULL_REQUEST:"true"}},{name:"Sail CI",constant:"SAIL",env:"SAILCI",pr:"SAIL_PULL_REQUEST_NUMBER"},{name:"Semaphore",constant:"SEMAPHORE",env:"SEMAPHORE",pr:"PULL_REQUEST_NUMBER"},{name:"Screwdriver",constant:"SCREWDRIVER",env:"SCREWDRIVER",pr:{env:"SD_PULL_REQUEST",ne:"false"}},{name:"Shippable",constant:"SHIPPABLE",env:"SHIPPABLE",pr:{IS_PULL_REQUEST:"true"}},{name:"Solano CI",constant:"SOLANO",env:"TDDIUM",pr:"TDDIUM_PR_ID"},{name:"Strider CD",constant:"STRIDER",env:"STRIDER"},{name:"TaskCluster",constant:"TASKCLUSTER",env:["TASK_ID","RUN_ID"]},{name:"TeamCity",constant:"TEAMCITY",env:"TEAMCITY_VERSION"},{name:"Travis CI",constant:"TRAVIS",env:"TRAVIS",pr:{env:"TRAVIS_PULL_REQUEST",ne:"false"}},{name:"Vercel",constant:"VERCEL",env:"NOW_BUILDER"},{name:"Visual Studio App Center",constant:"APPCENTER",env:"APPCENTER_BUILD_ID"}]});var Ac=w(Un=>{"use strict";var dH=hH(),xo=process.env;Object.defineProperty(Un,"_vendors",{value:dH.map(function(r){return r.constant})});Un.name=null;Un.isPR=null;dH.forEach(function(r){let t=(Array.isArray(r.env)?r.env:[r.env]).every(function(i){return pH(i)});if(Un[r.constant]=t,t)switch(Un.name=r.name,typeof r.pr){case"string":Un.isPR=!!xo[r.pr];break;case"object":"env"in r.pr?Un.isPR=r.pr.env in xo&&xo[r.pr.env]!==r.pr.ne:"any"in r.pr?Un.isPR=r.pr.any.some(function(i){return!!xo[i]}):Un.isPR=pH(r.pr);break;default:Un.isPR=null}});Un.isCI=!!(xo.CI||xo.CONTINUOUS_INTEGRATION||xo.BUILD_NUMBER||xo.RUN_ID||Un.name);function pH(r){return typeof r=="string"?!!xo[r]:Object.keys(r).every(function(e){return xo[e]===r[e]})}});var hn={};ut(hn,{KeyRelationship:()=>lc,applyCascade:()=>od,base64RegExp:()=>yH,colorStringAlphaRegExp:()=>IH,colorStringRegExp:()=>EH,computeKey:()=>FA,getPrintable:()=>Vr,hasExactLength:()=>SH,hasForbiddenKeys:()=>qde,hasKeyRelationship:()=>av,hasMaxLength:()=>xde,hasMinLength:()=>vde,hasMutuallyExclusiveKeys:()=>Jde,hasRequiredKeys:()=>jde,hasUniqueItems:()=>Pde,isArray:()=>Ede,isAtLeast:()=>Rde,isAtMost:()=>Fde,isBase64:()=>Gde,isBoolean:()=>dde,isDate:()=>mde,isDict:()=>yde,isEnum:()=>Zi,isHexColor:()=>Hde,isISO8601:()=>Ude,isInExclusiveRange:()=>Tde,isInInclusiveRange:()=>Nde,isInstanceOf:()=>Bde,isInteger:()=>Lde,isJSON:()=>Yde,isLiteral:()=>hde,isLowerCase:()=>Mde,isNegative:()=>Dde,isNullable:()=>Sde,isNumber:()=>Cde,isObject:()=>wde,isOneOf:()=>bde,isOptional:()=>Qde,isPositive:()=>kde,isString:()=>sd,isTuple:()=>Ide,isUUID4:()=>Kde,isUnknown:()=>QH,isUpperCase:()=>Ode,iso8601RegExp:()=>ov,makeCoercionFn:()=>cc,makeSetter:()=>bH,makeTrait:()=>BH,makeValidator:()=>Qt,matchesRegExp:()=>ad,plural:()=>kI,pushError:()=>pt,simpleKeyRegExp:()=>mH,uuid4RegExp:()=>wH});function Qt({test:r}){return BH(r)()}function Vr(r){return r===null?"null":r===void 0?"undefined":r===""?"an empty string":JSON.stringify(r)}function FA(r,e){var t,i,n;return typeof e=="number"?`${(t=r==null?void 0:r.p)!==null&&t!==void 0?t:"."}[${e}]`:mH.test(e)?`${(i=r==null?void 0:r.p)!==null&&i!==void 0?i:""}.${e}`:`${(n=r==null?void 0:r.p)!==null&&n!==void 0?n:"."}[${JSON.stringify(e)}]`}function cc(r,e){return t=>{let i=r[e];return r[e]=t,cc(r,e).bind(null,i)}}function bH(r,e){return t=>{r[e]=t}}function kI(r,e,t){return r===1?e:t}function pt({errors:r,p:e}={},t){return r==null||r.push(`${e!=null?e:"."}: ${t}`),!1}function hde(r){return Qt({test:(e,t)=>e!==r?pt(t,`Expected a literal (got ${Vr(r)})`):!0})}function Zi(r){let e=Array.isArray(r)?r:Object.values(r),t=new Set(e);return Qt({test:(i,n)=>t.has(i)?!0:pt(n,`Expected a valid enumeration value (got ${Vr(i)})`)})}var mH,EH,IH,yH,wH,ov,BH,QH,sd,pde,dde,Cde,mde,Ede,Ide,yde,wde,Bde,bde,od,Qde,Sde,vde,xde,SH,Pde,Dde,kde,Rde,Fde,Nde,Tde,Lde,ad,Mde,Ode,Kde,Ude,Hde,Gde,Yde,jde,qde,Jde,lc,Wde,av,ls=Fge(()=>{mH=/^[a-zA-Z_][a-zA-Z0-9_]*$/,EH=/^#[0-9a-f]{6}$/i,IH=/^#[0-9a-f]{6}([0-9a-f]{2})?$/i,yH=/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/,wH=/^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}$/i,ov=/^(?:[1-9]\d{3}(-?)(?:(?:0[1-9]|1[0-2])\1(?:0[1-9]|1\d|2[0-8])|(?:0[13-9]|1[0-2])\1(?:29|30)|(?:0[13578]|1[02])(?:\1)31|00[1-9]|0[1-9]\d|[12]\d{2}|3(?:[0-5]\d|6[0-5]))|(?:[1-9]\d(?:0[48]|[2468][048]|[13579][26])|(?:[2468][048]|[13579][26])00)(?:(-?)02(?:\2)29|-?366))T(?:[01]\d|2[0-3])(:?)[0-5]\d(?:\3[0-5]\d)?(?:Z|[+-][01]\d(?:\3[0-5]\d)?)$/,BH=r=>()=>r;QH=()=>Qt({test:(r,e)=>!0});sd=()=>Qt({test:(r,e)=>typeof r!="string"?pt(e,`Expected a string (got ${Vr(r)})`):!0});pde=new Map([["true",!0],["True",!0],["1",!0],[1,!0],["false",!1],["False",!1],["0",!1],[0,!1]]),dde=()=>Qt({test:(r,e)=>{var t;if(typeof r!="boolean"){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i=pde.get(r);if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a boolean (got ${Vr(r)})`)}return!0}}),Cde=()=>Qt({test:(r,e)=>{var t;if(typeof r!="number"){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i;if(typeof r=="string"){let n;try{n=JSON.parse(r)}catch{}if(typeof n=="number")if(JSON.stringify(n)===r)i=n;else return pt(e,`Received a number that can't be safely represented by the runtime (${r})`)}if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a number (got ${Vr(r)})`)}return!0}}),mde=()=>Qt({test:(r,e)=>{var t;if(!(r instanceof Date)){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i;if(typeof r=="string"&&ov.test(r))i=new Date(r);else{let n;if(typeof r=="string"){let s;try{s=JSON.parse(r)}catch{}typeof s=="number"&&(n=s)}else typeof r=="number"&&(n=r);if(typeof n<"u")if(Number.isSafeInteger(n)||!Number.isSafeInteger(n*1e3))i=new Date(n*1e3);else return pt(e,`Received a timestamp that can't be safely represented by the runtime (${r})`)}if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a date (got ${Vr(r)})`)}return!0}}),Ede=(r,{delimiter:e}={})=>Qt({test:(t,i)=>{var n;if(typeof t=="string"&&typeof e<"u"&&typeof(i==null?void 0:i.coercions)<"u"){if(typeof(i==null?void 0:i.coercion)>"u")return pt(i,"Unbound coercion result");t=t.split(e),i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,t)])}if(!Array.isArray(t))return pt(i,`Expected an array (got ${Vr(t)})`);let s=!0;for(let o=0,a=t.length;o{let t=SH(r.length);return Qt({test:(i,n)=>{var s;if(typeof i=="string"&&typeof e<"u"&&typeof(n==null?void 0:n.coercions)<"u"){if(typeof(n==null?void 0:n.coercion)>"u")return pt(n,"Unbound coercion result");i=i.split(e),n.coercions.push([(s=n.p)!==null&&s!==void 0?s:".",n.coercion.bind(null,i)])}if(!Array.isArray(i))return pt(n,`Expected a tuple (got ${Vr(i)})`);let o=t(i,Object.assign({},n));for(let a=0,l=i.length;aQt({test:(t,i)=>{if(typeof t!="object"||t===null)return pt(i,`Expected an object (got ${Vr(t)})`);let n=Object.keys(t),s=!0;for(let o=0,a=n.length;o{let t=Object.keys(r);return Qt({test:(i,n)=>{if(typeof i!="object"||i===null)return pt(n,`Expected an object (got ${Vr(i)})`);let s=new Set([...t,...Object.keys(i)]),o={},a=!0;for(let l of s){if(l==="constructor"||l==="__proto__")a=pt(Object.assign(Object.assign({},n),{p:FA(n,l)}),"Unsafe property name");else{let c=Object.prototype.hasOwnProperty.call(r,l)?r[l]:void 0,u=Object.prototype.hasOwnProperty.call(i,l)?i[l]:void 0;typeof c<"u"?a=c(u,Object.assign(Object.assign({},n),{p:FA(n,l),coercion:cc(i,l)}))&&a:e===null?a=pt(Object.assign(Object.assign({},n),{p:FA(n,l)}),`Extraneous property (got ${Vr(u)})`):Object.defineProperty(o,l,{enumerable:!0,get:()=>u,set:bH(i,l)})}if(!a&&(n==null?void 0:n.errors)==null)break}return e!==null&&(a||(n==null?void 0:n.errors)!=null)&&(a=e(o,n)&&a),a}})},Bde=r=>Qt({test:(e,t)=>e instanceof r?!0:pt(t,`Expected an instance of ${r.name} (got ${Vr(e)})`)}),bde=(r,{exclusive:e=!1}={})=>Qt({test:(t,i)=>{var n,s,o;let a=[],l=typeof(i==null?void 0:i.errors)<"u"?[]:void 0;for(let c=0,u=r.length;c1?pt(i,`Expected to match exactly a single predicate (matched ${a.join(", ")})`):(o=i==null?void 0:i.errors)===null||o===void 0||o.push(...l),!1}}),od=(r,e)=>Qt({test:(t,i)=>{var n,s;let o={value:t},a=typeof(i==null?void 0:i.coercions)<"u"?cc(o,"value"):void 0,l=typeof(i==null?void 0:i.coercions)<"u"?[]:void 0;if(!r(t,Object.assign(Object.assign({},i),{coercion:a,coercions:l})))return!1;let c=[];if(typeof l<"u")for(let[,u]of l)c.push(u());try{if(typeof(i==null?void 0:i.coercions)<"u"){if(o.value!==t){if(typeof(i==null?void 0:i.coercion)>"u")return pt(i,"Unbound coercion result");i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,o.value)])}(s=i==null?void 0:i.coercions)===null||s===void 0||s.push(...l)}return e.every(u=>u(o.value,i))}finally{for(let u of c)u()}}}),Qde=r=>Qt({test:(e,t)=>typeof e>"u"?!0:r(e,t)}),Sde=r=>Qt({test:(e,t)=>e===null?!0:r(e,t)}),vde=r=>Qt({test:(e,t)=>e.length>=r?!0:pt(t,`Expected to have a length of at least ${r} elements (got ${e.length})`)}),xde=r=>Qt({test:(e,t)=>e.length<=r?!0:pt(t,`Expected to have a length of at most ${r} elements (got ${e.length})`)}),SH=r=>Qt({test:(e,t)=>e.length!==r?pt(t,`Expected to have a length of exactly ${r} elements (got ${e.length})`):!0}),Pde=({map:r}={})=>Qt({test:(e,t)=>{let i=new Set,n=new Set;for(let s=0,o=e.length;sQt({test:(r,e)=>r<=0?!0:pt(e,`Expected to be negative (got ${r})`)}),kde=()=>Qt({test:(r,e)=>r>=0?!0:pt(e,`Expected to be positive (got ${r})`)}),Rde=r=>Qt({test:(e,t)=>e>=r?!0:pt(t,`Expected to be at least ${r} (got ${e})`)}),Fde=r=>Qt({test:(e,t)=>e<=r?!0:pt(t,`Expected to be at most ${r} (got ${e})`)}),Nde=(r,e)=>Qt({test:(t,i)=>t>=r&&t<=e?!0:pt(i,`Expected to be in the [${r}; ${e}] range (got ${t})`)}),Tde=(r,e)=>Qt({test:(t,i)=>t>=r&&tQt({test:(e,t)=>e!==Math.round(e)?pt(t,`Expected to be an integer (got ${e})`):Number.isSafeInteger(e)?!0:pt(t,`Expected to be a safe integer (got ${e})`)}),ad=r=>Qt({test:(e,t)=>r.test(e)?!0:pt(t,`Expected to match the pattern ${r.toString()} (got ${Vr(e)})`)}),Mde=()=>Qt({test:(r,e)=>r!==r.toLowerCase()?pt(e,`Expected to be all-lowercase (got ${r})`):!0}),Ode=()=>Qt({test:(r,e)=>r!==r.toUpperCase()?pt(e,`Expected to be all-uppercase (got ${r})`):!0}),Kde=()=>Qt({test:(r,e)=>wH.test(r)?!0:pt(e,`Expected to be a valid UUID v4 (got ${Vr(r)})`)}),Ude=()=>Qt({test:(r,e)=>ov.test(r)?!1:pt(e,`Expected to be a valid ISO 8601 date string (got ${Vr(r)})`)}),Hde=({alpha:r=!1})=>Qt({test:(e,t)=>(r?EH.test(e):IH.test(e))?!0:pt(t,`Expected to be a valid hexadecimal color string (got ${Vr(e)})`)}),Gde=()=>Qt({test:(r,e)=>yH.test(r)?!0:pt(e,`Expected to be a valid base 64 string (got ${Vr(r)})`)}),Yde=(r=QH())=>Qt({test:(e,t)=>{let i;try{i=JSON.parse(e)}catch{return pt(t,`Expected to be a valid JSON string (got ${Vr(e)})`)}return r(i,t)}}),jde=r=>{let e=new Set(r);return Qt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)||s.push(o);return s.length>0?pt(i,`Missing required ${kI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},qde=r=>{let e=new Set(r);return Qt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>0?pt(i,`Forbidden ${kI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},Jde=r=>{let e=new Set(r);return Qt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>1?pt(i,`Mutually exclusive properties ${s.map(o=>`"${o}"`).join(", ")}`):!0}})};(function(r){r.Forbids="Forbids",r.Requires="Requires"})(lc||(lc={}));Wde={[lc.Forbids]:{expect:!1,message:"forbids using"},[lc.Requires]:{expect:!0,message:"requires using"}},av=(r,e,t,{ignore:i=[]}={})=>{let n=new Set(i),s=new Set(t),o=Wde[e];return Qt({test:(a,l)=>{let c=new Set(Object.keys(a));if(!c.has(r)||n.has(a[r]))return!0;let u=[];for(let g of s)(c.has(g)&&!n.has(a[g]))!==o.expect&&u.push(g);return u.length>=1?pt(l,`Property "${r}" ${o.message} ${kI(u.length,"property","properties")} ${u.map(g=>`"${g}"`).join(", ")}`):!0}})}});var YH=w((o$e,GH)=>{"use strict";GH.exports=(r,...e)=>new Promise(t=>{t(r(...e))})});var Jg=w((a$e,pv)=>{"use strict";var cCe=YH(),jH=r=>{if(r<1)throw new TypeError("Expected `concurrency` to be a number from 1 and up");let e=[],t=0,i=()=>{t--,e.length>0&&e.shift()()},n=(a,l,...c)=>{t++;let u=cCe(a,...c);l(u),u.then(i,i)},s=(a,l,...c)=>{tnew Promise(c=>s(a,c,...l));return Object.defineProperties(o,{activeCount:{get:()=>t},pendingCount:{get:()=>e.length}}),o};pv.exports=jH;pv.exports.default=jH});var gd=w((l$e,qH)=>{var uCe="2.0.0",gCe=Number.MAX_SAFE_INTEGER||9007199254740991,fCe=16;qH.exports={SEMVER_SPEC_VERSION:uCe,MAX_LENGTH:256,MAX_SAFE_INTEGER:gCe,MAX_SAFE_COMPONENT_LENGTH:fCe}});var fd=w((c$e,JH)=>{var hCe=typeof process=="object"&&process.env&&process.env.NODE_DEBUG&&/\bsemver\b/i.test(process.env.NODE_DEBUG)?(...r)=>console.error("SEMVER",...r):()=>{};JH.exports=hCe});var uc=w((TA,WH)=>{var{MAX_SAFE_COMPONENT_LENGTH:dv}=gd(),pCe=fd();TA=WH.exports={};var dCe=TA.re=[],et=TA.src=[],tt=TA.t={},CCe=0,St=(r,e,t)=>{let i=CCe++;pCe(i,e),tt[r]=i,et[i]=e,dCe[i]=new RegExp(e,t?"g":void 0)};St("NUMERICIDENTIFIER","0|[1-9]\\d*");St("NUMERICIDENTIFIERLOOSE","[0-9]+");St("NONNUMERICIDENTIFIER","\\d*[a-zA-Z-][a-zA-Z0-9-]*");St("MAINVERSION",`(${et[tt.NUMERICIDENTIFIER]})\\.(${et[tt.NUMERICIDENTIFIER]})\\.(${et[tt.NUMERICIDENTIFIER]})`);St("MAINVERSIONLOOSE",`(${et[tt.NUMERICIDENTIFIERLOOSE]})\\.(${et[tt.NUMERICIDENTIFIERLOOSE]})\\.(${et[tt.NUMERICIDENTIFIERLOOSE]})`);St("PRERELEASEIDENTIFIER",`(?:${et[tt.NUMERICIDENTIFIER]}|${et[tt.NONNUMERICIDENTIFIER]})`);St("PRERELEASEIDENTIFIERLOOSE",`(?:${et[tt.NUMERICIDENTIFIERLOOSE]}|${et[tt.NONNUMERICIDENTIFIER]})`);St("PRERELEASE",`(?:-(${et[tt.PRERELEASEIDENTIFIER]}(?:\\.${et[tt.PRERELEASEIDENTIFIER]})*))`);St("PRERELEASELOOSE",`(?:-?(${et[tt.PRERELEASEIDENTIFIERLOOSE]}(?:\\.${et[tt.PRERELEASEIDENTIFIERLOOSE]})*))`);St("BUILDIDENTIFIER","[0-9A-Za-z-]+");St("BUILD",`(?:\\+(${et[tt.BUILDIDENTIFIER]}(?:\\.${et[tt.BUILDIDENTIFIER]})*))`);St("FULLPLAIN",`v?${et[tt.MAINVERSION]}${et[tt.PRERELEASE]}?${et[tt.BUILD]}?`);St("FULL",`^${et[tt.FULLPLAIN]}$`);St("LOOSEPLAIN",`[v=\\s]*${et[tt.MAINVERSIONLOOSE]}${et[tt.PRERELEASELOOSE]}?${et[tt.BUILD]}?`);St("LOOSE",`^${et[tt.LOOSEPLAIN]}$`);St("GTLT","((?:<|>)?=?)");St("XRANGEIDENTIFIERLOOSE",`${et[tt.NUMERICIDENTIFIERLOOSE]}|x|X|\\*`);St("XRANGEIDENTIFIER",`${et[tt.NUMERICIDENTIFIER]}|x|X|\\*`);St("XRANGEPLAIN",`[v=\\s]*(${et[tt.XRANGEIDENTIFIER]})(?:\\.(${et[tt.XRANGEIDENTIFIER]})(?:\\.(${et[tt.XRANGEIDENTIFIER]})(?:${et[tt.PRERELEASE]})?${et[tt.BUILD]}?)?)?`);St("XRANGEPLAINLOOSE",`[v=\\s]*(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:${et[tt.PRERELEASELOOSE]})?${et[tt.BUILD]}?)?)?`);St("XRANGE",`^${et[tt.GTLT]}\\s*${et[tt.XRANGEPLAIN]}$`);St("XRANGELOOSE",`^${et[tt.GTLT]}\\s*${et[tt.XRANGEPLAINLOOSE]}$`);St("COERCE",`(^|[^\\d])(\\d{1,${dv}})(?:\\.(\\d{1,${dv}}))?(?:\\.(\\d{1,${dv}}))?(?:$|[^\\d])`);St("COERCERTL",et[tt.COERCE],!0);St("LONETILDE","(?:~>?)");St("TILDETRIM",`(\\s*)${et[tt.LONETILDE]}\\s+`,!0);TA.tildeTrimReplace="$1~";St("TILDE",`^${et[tt.LONETILDE]}${et[tt.XRANGEPLAIN]}$`);St("TILDELOOSE",`^${et[tt.LONETILDE]}${et[tt.XRANGEPLAINLOOSE]}$`);St("LONECARET","(?:\\^)");St("CARETTRIM",`(\\s*)${et[tt.LONECARET]}\\s+`,!0);TA.caretTrimReplace="$1^";St("CARET",`^${et[tt.LONECARET]}${et[tt.XRANGEPLAIN]}$`);St("CARETLOOSE",`^${et[tt.LONECARET]}${et[tt.XRANGEPLAINLOOSE]}$`);St("COMPARATORLOOSE",`^${et[tt.GTLT]}\\s*(${et[tt.LOOSEPLAIN]})$|^$`);St("COMPARATOR",`^${et[tt.GTLT]}\\s*(${et[tt.FULLPLAIN]})$|^$`);St("COMPARATORTRIM",`(\\s*)${et[tt.GTLT]}\\s*(${et[tt.LOOSEPLAIN]}|${et[tt.XRANGEPLAIN]})`,!0);TA.comparatorTrimReplace="$1$2$3";St("HYPHENRANGE",`^\\s*(${et[tt.XRANGEPLAIN]})\\s+-\\s+(${et[tt.XRANGEPLAIN]})\\s*$`);St("HYPHENRANGELOOSE",`^\\s*(${et[tt.XRANGEPLAINLOOSE]})\\s+-\\s+(${et[tt.XRANGEPLAINLOOSE]})\\s*$`);St("STAR","(<|>)?=?\\s*\\*");St("GTE0","^\\s*>=\\s*0.0.0\\s*$");St("GTE0PRE","^\\s*>=\\s*0.0.0-0\\s*$")});var hd=w((u$e,zH)=>{var mCe=["includePrerelease","loose","rtl"],ECe=r=>r?typeof r!="object"?{loose:!0}:mCe.filter(e=>r[e]).reduce((e,t)=>(e[t]=!0,e),{}):{};zH.exports=ECe});var MI=w((g$e,ZH)=>{var VH=/^[0-9]+$/,XH=(r,e)=>{let t=VH.test(r),i=VH.test(e);return t&&i&&(r=+r,e=+e),r===e?0:t&&!i?-1:i&&!t?1:rXH(e,r);ZH.exports={compareIdentifiers:XH,rcompareIdentifiers:ICe}});var Li=w((f$e,tG)=>{var OI=fd(),{MAX_LENGTH:_H,MAX_SAFE_INTEGER:KI}=gd(),{re:$H,t:eG}=uc(),yCe=hd(),{compareIdentifiers:pd}=MI(),Yn=class{constructor(e,t){if(t=yCe(t),e instanceof Yn){if(e.loose===!!t.loose&&e.includePrerelease===!!t.includePrerelease)return e;e=e.version}else if(typeof e!="string")throw new TypeError(`Invalid Version: ${e}`);if(e.length>_H)throw new TypeError(`version is longer than ${_H} characters`);OI("SemVer",e,t),this.options=t,this.loose=!!t.loose,this.includePrerelease=!!t.includePrerelease;let i=e.trim().match(t.loose?$H[eG.LOOSE]:$H[eG.FULL]);if(!i)throw new TypeError(`Invalid Version: ${e}`);if(this.raw=e,this.major=+i[1],this.minor=+i[2],this.patch=+i[3],this.major>KI||this.major<0)throw new TypeError("Invalid major version");if(this.minor>KI||this.minor<0)throw new TypeError("Invalid minor version");if(this.patch>KI||this.patch<0)throw new TypeError("Invalid patch version");i[4]?this.prerelease=i[4].split(".").map(n=>{if(/^[0-9]+$/.test(n)){let s=+n;if(s>=0&&s=0;)typeof this.prerelease[i]=="number"&&(this.prerelease[i]++,i=-2);i===-1&&this.prerelease.push(0)}t&&(this.prerelease[0]===t?isNaN(this.prerelease[1])&&(this.prerelease=[t,0]):this.prerelease=[t,0]);break;default:throw new Error(`invalid increment argument: ${e}`)}return this.format(),this.raw=this.version,this}};tG.exports=Yn});var gc=w((h$e,sG)=>{var{MAX_LENGTH:wCe}=gd(),{re:rG,t:iG}=uc(),nG=Li(),BCe=hd(),bCe=(r,e)=>{if(e=BCe(e),r instanceof nG)return r;if(typeof r!="string"||r.length>wCe||!(e.loose?rG[iG.LOOSE]:rG[iG.FULL]).test(r))return null;try{return new nG(r,e)}catch{return null}};sG.exports=bCe});var aG=w((p$e,oG)=>{var QCe=gc(),SCe=(r,e)=>{let t=QCe(r,e);return t?t.version:null};oG.exports=SCe});var lG=w((d$e,AG)=>{var vCe=gc(),xCe=(r,e)=>{let t=vCe(r.trim().replace(/^[=v]+/,""),e);return t?t.version:null};AG.exports=xCe});var uG=w((C$e,cG)=>{var PCe=Li(),DCe=(r,e,t,i)=>{typeof t=="string"&&(i=t,t=void 0);try{return new PCe(r,t).inc(e,i).version}catch{return null}};cG.exports=DCe});var cs=w((m$e,fG)=>{var gG=Li(),kCe=(r,e,t)=>new gG(r,t).compare(new gG(e,t));fG.exports=kCe});var UI=w((E$e,hG)=>{var RCe=cs(),FCe=(r,e,t)=>RCe(r,e,t)===0;hG.exports=FCe});var CG=w((I$e,dG)=>{var pG=gc(),NCe=UI(),TCe=(r,e)=>{if(NCe(r,e))return null;{let t=pG(r),i=pG(e),n=t.prerelease.length||i.prerelease.length,s=n?"pre":"",o=n?"prerelease":"";for(let a in t)if((a==="major"||a==="minor"||a==="patch")&&t[a]!==i[a])return s+a;return o}};dG.exports=TCe});var EG=w((y$e,mG)=>{var LCe=Li(),MCe=(r,e)=>new LCe(r,e).major;mG.exports=MCe});var yG=w((w$e,IG)=>{var OCe=Li(),KCe=(r,e)=>new OCe(r,e).minor;IG.exports=KCe});var BG=w((B$e,wG)=>{var UCe=Li(),HCe=(r,e)=>new UCe(r,e).patch;wG.exports=HCe});var QG=w((b$e,bG)=>{var GCe=gc(),YCe=(r,e)=>{let t=GCe(r,e);return t&&t.prerelease.length?t.prerelease:null};bG.exports=YCe});var vG=w((Q$e,SG)=>{var jCe=cs(),qCe=(r,e,t)=>jCe(e,r,t);SG.exports=qCe});var PG=w((S$e,xG)=>{var JCe=cs(),WCe=(r,e)=>JCe(r,e,!0);xG.exports=WCe});var HI=w((v$e,kG)=>{var DG=Li(),zCe=(r,e,t)=>{let i=new DG(r,t),n=new DG(e,t);return i.compare(n)||i.compareBuild(n)};kG.exports=zCe});var FG=w((x$e,RG)=>{var VCe=HI(),XCe=(r,e)=>r.sort((t,i)=>VCe(t,i,e));RG.exports=XCe});var TG=w((P$e,NG)=>{var ZCe=HI(),_Ce=(r,e)=>r.sort((t,i)=>ZCe(i,t,e));NG.exports=_Ce});var dd=w((D$e,LG)=>{var $Ce=cs(),eme=(r,e,t)=>$Ce(r,e,t)>0;LG.exports=eme});var GI=w((k$e,MG)=>{var tme=cs(),rme=(r,e,t)=>tme(r,e,t)<0;MG.exports=rme});var Cv=w((R$e,OG)=>{var ime=cs(),nme=(r,e,t)=>ime(r,e,t)!==0;OG.exports=nme});var YI=w((F$e,KG)=>{var sme=cs(),ome=(r,e,t)=>sme(r,e,t)>=0;KG.exports=ome});var jI=w((N$e,UG)=>{var ame=cs(),Ame=(r,e,t)=>ame(r,e,t)<=0;UG.exports=Ame});var mv=w((T$e,HG)=>{var lme=UI(),cme=Cv(),ume=dd(),gme=YI(),fme=GI(),hme=jI(),pme=(r,e,t,i)=>{switch(e){case"===":return typeof r=="object"&&(r=r.version),typeof t=="object"&&(t=t.version),r===t;case"!==":return typeof r=="object"&&(r=r.version),typeof t=="object"&&(t=t.version),r!==t;case"":case"=":case"==":return lme(r,t,i);case"!=":return cme(r,t,i);case">":return ume(r,t,i);case">=":return gme(r,t,i);case"<":return fme(r,t,i);case"<=":return hme(r,t,i);default:throw new TypeError(`Invalid operator: ${e}`)}};HG.exports=pme});var YG=w((L$e,GG)=>{var dme=Li(),Cme=gc(),{re:qI,t:JI}=uc(),mme=(r,e)=>{if(r instanceof dme)return r;if(typeof r=="number"&&(r=String(r)),typeof r!="string")return null;e=e||{};let t=null;if(!e.rtl)t=r.match(qI[JI.COERCE]);else{let i;for(;(i=qI[JI.COERCERTL].exec(r))&&(!t||t.index+t[0].length!==r.length);)(!t||i.index+i[0].length!==t.index+t[0].length)&&(t=i),qI[JI.COERCERTL].lastIndex=i.index+i[1].length+i[2].length;qI[JI.COERCERTL].lastIndex=-1}return t===null?null:Cme(`${t[2]}.${t[3]||"0"}.${t[4]||"0"}`,e)};GG.exports=mme});var qG=w((M$e,jG)=>{"use strict";jG.exports=function(r){r.prototype[Symbol.iterator]=function*(){for(let e=this.head;e;e=e.next)yield e.value}}});var WI=w((O$e,JG)=>{"use strict";JG.exports=Ht;Ht.Node=fc;Ht.create=Ht;function Ht(r){var e=this;if(e instanceof Ht||(e=new Ht),e.tail=null,e.head=null,e.length=0,r&&typeof r.forEach=="function")r.forEach(function(n){e.push(n)});else if(arguments.length>0)for(var t=0,i=arguments.length;t1)t=e;else if(this.head)i=this.head.next,t=this.head.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=0;i!==null;n++)t=r(t,i.value,n),i=i.next;return t};Ht.prototype.reduceReverse=function(r,e){var t,i=this.tail;if(arguments.length>1)t=e;else if(this.tail)i=this.tail.prev,t=this.tail.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=this.length-1;i!==null;n--)t=r(t,i.value,n),i=i.prev;return t};Ht.prototype.toArray=function(){for(var r=new Array(this.length),e=0,t=this.head;t!==null;e++)r[e]=t.value,t=t.next;return r};Ht.prototype.toArrayReverse=function(){for(var r=new Array(this.length),e=0,t=this.tail;t!==null;e++)r[e]=t.value,t=t.prev;return r};Ht.prototype.slice=function(r,e){e=e||this.length,e<0&&(e+=this.length),r=r||0,r<0&&(r+=this.length);var t=new Ht;if(ethis.length&&(e=this.length);for(var i=0,n=this.head;n!==null&&ithis.length&&(e=this.length);for(var i=this.length,n=this.tail;n!==null&&i>e;i--)n=n.prev;for(;n!==null&&i>r;i--,n=n.prev)t.push(n.value);return t};Ht.prototype.splice=function(r,e,...t){r>this.length&&(r=this.length-1),r<0&&(r=this.length+r);for(var i=0,n=this.head;n!==null&&i{"use strict";var wme=WI(),hc=Symbol("max"),va=Symbol("length"),Wg=Symbol("lengthCalculator"),md=Symbol("allowStale"),pc=Symbol("maxAge"),Sa=Symbol("dispose"),WG=Symbol("noDisposeOnSet"),di=Symbol("lruList"),Zs=Symbol("cache"),VG=Symbol("updateAgeOnGet"),Ev=()=>1,yv=class{constructor(e){if(typeof e=="number"&&(e={max:e}),e||(e={}),e.max&&(typeof e.max!="number"||e.max<0))throw new TypeError("max must be a non-negative number");let t=this[hc]=e.max||1/0,i=e.length||Ev;if(this[Wg]=typeof i!="function"?Ev:i,this[md]=e.stale||!1,e.maxAge&&typeof e.maxAge!="number")throw new TypeError("maxAge must be a number");this[pc]=e.maxAge||0,this[Sa]=e.dispose,this[WG]=e.noDisposeOnSet||!1,this[VG]=e.updateAgeOnGet||!1,this.reset()}set max(e){if(typeof e!="number"||e<0)throw new TypeError("max must be a non-negative number");this[hc]=e||1/0,Cd(this)}get max(){return this[hc]}set allowStale(e){this[md]=!!e}get allowStale(){return this[md]}set maxAge(e){if(typeof e!="number")throw new TypeError("maxAge must be a non-negative number");this[pc]=e,Cd(this)}get maxAge(){return this[pc]}set lengthCalculator(e){typeof e!="function"&&(e=Ev),e!==this[Wg]&&(this[Wg]=e,this[va]=0,this[di].forEach(t=>{t.length=this[Wg](t.value,t.key),this[va]+=t.length})),Cd(this)}get lengthCalculator(){return this[Wg]}get length(){return this[va]}get itemCount(){return this[di].length}rforEach(e,t){t=t||this;for(let i=this[di].tail;i!==null;){let n=i.prev;zG(this,e,i,t),i=n}}forEach(e,t){t=t||this;for(let i=this[di].head;i!==null;){let n=i.next;zG(this,e,i,t),i=n}}keys(){return this[di].toArray().map(e=>e.key)}values(){return this[di].toArray().map(e=>e.value)}reset(){this[Sa]&&this[di]&&this[di].length&&this[di].forEach(e=>this[Sa](e.key,e.value)),this[Zs]=new Map,this[di]=new wme,this[va]=0}dump(){return this[di].map(e=>zI(this,e)?!1:{k:e.key,v:e.value,e:e.now+(e.maxAge||0)}).toArray().filter(e=>e)}dumpLru(){return this[di]}set(e,t,i){if(i=i||this[pc],i&&typeof i!="number")throw new TypeError("maxAge must be a number");let n=i?Date.now():0,s=this[Wg](t,e);if(this[Zs].has(e)){if(s>this[hc])return zg(this,this[Zs].get(e)),!1;let l=this[Zs].get(e).value;return this[Sa]&&(this[WG]||this[Sa](e,l.value)),l.now=n,l.maxAge=i,l.value=t,this[va]+=s-l.length,l.length=s,this.get(e),Cd(this),!0}let o=new wv(e,t,s,n,i);return o.length>this[hc]?(this[Sa]&&this[Sa](e,t),!1):(this[va]+=o.length,this[di].unshift(o),this[Zs].set(e,this[di].head),Cd(this),!0)}has(e){if(!this[Zs].has(e))return!1;let t=this[Zs].get(e).value;return!zI(this,t)}get(e){return Iv(this,e,!0)}peek(e){return Iv(this,e,!1)}pop(){let e=this[di].tail;return e?(zg(this,e),e.value):null}del(e){zg(this,this[Zs].get(e))}load(e){this.reset();let t=Date.now();for(let i=e.length-1;i>=0;i--){let n=e[i],s=n.e||0;if(s===0)this.set(n.k,n.v);else{let o=s-t;o>0&&this.set(n.k,n.v,o)}}}prune(){this[Zs].forEach((e,t)=>Iv(this,t,!1))}},Iv=(r,e,t)=>{let i=r[Zs].get(e);if(i){let n=i.value;if(zI(r,n)){if(zg(r,i),!r[md])return}else t&&(r[VG]&&(i.value.now=Date.now()),r[di].unshiftNode(i));return n.value}},zI=(r,e)=>{if(!e||!e.maxAge&&!r[pc])return!1;let t=Date.now()-e.now;return e.maxAge?t>e.maxAge:r[pc]&&t>r[pc]},Cd=r=>{if(r[va]>r[hc])for(let e=r[di].tail;r[va]>r[hc]&&e!==null;){let t=e.prev;zg(r,e),e=t}},zg=(r,e)=>{if(e){let t=e.value;r[Sa]&&r[Sa](t.key,t.value),r[va]-=t.length,r[Zs].delete(t.key),r[di].removeNode(e)}},wv=class{constructor(e,t,i,n,s){this.key=e,this.value=t,this.length=i,this.now=n,this.maxAge=s||0}},zG=(r,e,t,i)=>{let n=t.value;zI(r,n)&&(zg(r,t),r[md]||(n=void 0)),n&&e.call(i,n.value,n.key,r)};XG.exports=yv});var us=w((U$e,tY)=>{var dc=class{constructor(e,t){if(t=bme(t),e instanceof dc)return e.loose===!!t.loose&&e.includePrerelease===!!t.includePrerelease?e:new dc(e.raw,t);if(e instanceof Bv)return this.raw=e.value,this.set=[[e]],this.format(),this;if(this.options=t,this.loose=!!t.loose,this.includePrerelease=!!t.includePrerelease,this.raw=e,this.set=e.split(/\s*\|\|\s*/).map(i=>this.parseRange(i.trim())).filter(i=>i.length),!this.set.length)throw new TypeError(`Invalid SemVer Range: ${e}`);if(this.set.length>1){let i=this.set[0];if(this.set=this.set.filter(n=>!$G(n[0])),this.set.length===0)this.set=[i];else if(this.set.length>1){for(let n of this.set)if(n.length===1&&Pme(n[0])){this.set=[n];break}}}this.format()}format(){return this.range=this.set.map(e=>e.join(" ").trim()).join("||").trim(),this.range}toString(){return this.range}parseRange(e){e=e.trim();let i=`parseRange:${Object.keys(this.options).join(",")}:${e}`,n=_G.get(i);if(n)return n;let s=this.options.loose,o=s?Mi[bi.HYPHENRANGELOOSE]:Mi[bi.HYPHENRANGE];e=e.replace(o,Kme(this.options.includePrerelease)),Gr("hyphen replace",e),e=e.replace(Mi[bi.COMPARATORTRIM],Sme),Gr("comparator trim",e,Mi[bi.COMPARATORTRIM]),e=e.replace(Mi[bi.TILDETRIM],vme),e=e.replace(Mi[bi.CARETTRIM],xme),e=e.split(/\s+/).join(" ");let a=s?Mi[bi.COMPARATORLOOSE]:Mi[bi.COMPARATOR],l=e.split(" ").map(f=>Dme(f,this.options)).join(" ").split(/\s+/).map(f=>Ome(f,this.options)).filter(this.options.loose?f=>!!f.match(a):()=>!0).map(f=>new Bv(f,this.options)),c=l.length,u=new Map;for(let f of l){if($G(f))return[f];u.set(f.value,f)}u.size>1&&u.has("")&&u.delete("");let g=[...u.values()];return _G.set(i,g),g}intersects(e,t){if(!(e instanceof dc))throw new TypeError("a Range is required");return this.set.some(i=>eY(i,t)&&e.set.some(n=>eY(n,t)&&i.every(s=>n.every(o=>s.intersects(o,t)))))}test(e){if(!e)return!1;if(typeof e=="string")try{e=new Qme(e,this.options)}catch{return!1}for(let t=0;tr.value==="<0.0.0-0",Pme=r=>r.value==="",eY=(r,e)=>{let t=!0,i=r.slice(),n=i.pop();for(;t&&i.length;)t=i.every(s=>n.intersects(s,e)),n=i.pop();return t},Dme=(r,e)=>(Gr("comp",r,e),r=Fme(r,e),Gr("caret",r),r=kme(r,e),Gr("tildes",r),r=Tme(r,e),Gr("xrange",r),r=Mme(r,e),Gr("stars",r),r),$i=r=>!r||r.toLowerCase()==="x"||r==="*",kme=(r,e)=>r.trim().split(/\s+/).map(t=>Rme(t,e)).join(" "),Rme=(r,e)=>{let t=e.loose?Mi[bi.TILDELOOSE]:Mi[bi.TILDE];return r.replace(t,(i,n,s,o,a)=>{Gr("tilde",r,i,n,s,o,a);let l;return $i(n)?l="":$i(s)?l=`>=${n}.0.0 <${+n+1}.0.0-0`:$i(o)?l=`>=${n}.${s}.0 <${n}.${+s+1}.0-0`:a?(Gr("replaceTilde pr",a),l=`>=${n}.${s}.${o}-${a} <${n}.${+s+1}.0-0`):l=`>=${n}.${s}.${o} <${n}.${+s+1}.0-0`,Gr("tilde return",l),l})},Fme=(r,e)=>r.trim().split(/\s+/).map(t=>Nme(t,e)).join(" "),Nme=(r,e)=>{Gr("caret",r,e);let t=e.loose?Mi[bi.CARETLOOSE]:Mi[bi.CARET],i=e.includePrerelease?"-0":"";return r.replace(t,(n,s,o,a,l)=>{Gr("caret",r,n,s,o,a,l);let c;return $i(s)?c="":$i(o)?c=`>=${s}.0.0${i} <${+s+1}.0.0-0`:$i(a)?s==="0"?c=`>=${s}.${o}.0${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.0${i} <${+s+1}.0.0-0`:l?(Gr("replaceCaret pr",l),s==="0"?o==="0"?c=`>=${s}.${o}.${a}-${l} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}-${l} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a}-${l} <${+s+1}.0.0-0`):(Gr("no pr"),s==="0"?o==="0"?c=`>=${s}.${o}.${a}${i} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a} <${+s+1}.0.0-0`),Gr("caret return",c),c})},Tme=(r,e)=>(Gr("replaceXRanges",r,e),r.split(/\s+/).map(t=>Lme(t,e)).join(" ")),Lme=(r,e)=>{r=r.trim();let t=e.loose?Mi[bi.XRANGELOOSE]:Mi[bi.XRANGE];return r.replace(t,(i,n,s,o,a,l)=>{Gr("xRange",r,i,n,s,o,a,l);let c=$i(s),u=c||$i(o),g=u||$i(a),f=g;return n==="="&&f&&(n=""),l=e.includePrerelease?"-0":"",c?n===">"||n==="<"?i="<0.0.0-0":i="*":n&&f?(u&&(o=0),a=0,n===">"?(n=">=",u?(s=+s+1,o=0,a=0):(o=+o+1,a=0)):n==="<="&&(n="<",u?s=+s+1:o=+o+1),n==="<"&&(l="-0"),i=`${n+s}.${o}.${a}${l}`):u?i=`>=${s}.0.0${l} <${+s+1}.0.0-0`:g&&(i=`>=${s}.${o}.0${l} <${s}.${+o+1}.0-0`),Gr("xRange return",i),i})},Mme=(r,e)=>(Gr("replaceStars",r,e),r.trim().replace(Mi[bi.STAR],"")),Ome=(r,e)=>(Gr("replaceGTE0",r,e),r.trim().replace(Mi[e.includePrerelease?bi.GTE0PRE:bi.GTE0],"")),Kme=r=>(e,t,i,n,s,o,a,l,c,u,g,f,h)=>($i(i)?t="":$i(n)?t=`>=${i}.0.0${r?"-0":""}`:$i(s)?t=`>=${i}.${n}.0${r?"-0":""}`:o?t=`>=${t}`:t=`>=${t}${r?"-0":""}`,$i(c)?l="":$i(u)?l=`<${+c+1}.0.0-0`:$i(g)?l=`<${c}.${+u+1}.0-0`:f?l=`<=${c}.${u}.${g}-${f}`:r?l=`<${c}.${u}.${+g+1}-0`:l=`<=${l}`,`${t} ${l}`.trim()),Ume=(r,e,t)=>{for(let i=0;i0){let n=r[i].semver;if(n.major===e.major&&n.minor===e.minor&&n.patch===e.patch)return!0}return!1}return!0}});var Ed=w((H$e,oY)=>{var Id=Symbol("SemVer ANY"),Vg=class{static get ANY(){return Id}constructor(e,t){if(t=Hme(t),e instanceof Vg){if(e.loose===!!t.loose)return e;e=e.value}Qv("comparator",e,t),this.options=t,this.loose=!!t.loose,this.parse(e),this.semver===Id?this.value="":this.value=this.operator+this.semver.version,Qv("comp",this)}parse(e){let t=this.options.loose?rY[iY.COMPARATORLOOSE]:rY[iY.COMPARATOR],i=e.match(t);if(!i)throw new TypeError(`Invalid comparator: ${e}`);this.operator=i[1]!==void 0?i[1]:"",this.operator==="="&&(this.operator=""),i[2]?this.semver=new nY(i[2],this.options.loose):this.semver=Id}toString(){return this.value}test(e){if(Qv("Comparator.test",e,this.options.loose),this.semver===Id||e===Id)return!0;if(typeof e=="string")try{e=new nY(e,this.options)}catch{return!1}return bv(e,this.operator,this.semver,this.options)}intersects(e,t){if(!(e instanceof Vg))throw new TypeError("a Comparator is required");if((!t||typeof t!="object")&&(t={loose:!!t,includePrerelease:!1}),this.operator==="")return this.value===""?!0:new sY(e.value,t).test(this.value);if(e.operator==="")return e.value===""?!0:new sY(this.value,t).test(e.semver);let i=(this.operator===">="||this.operator===">")&&(e.operator===">="||e.operator===">"),n=(this.operator==="<="||this.operator==="<")&&(e.operator==="<="||e.operator==="<"),s=this.semver.version===e.semver.version,o=(this.operator===">="||this.operator==="<=")&&(e.operator===">="||e.operator==="<="),a=bv(this.semver,"<",e.semver,t)&&(this.operator===">="||this.operator===">")&&(e.operator==="<="||e.operator==="<"),l=bv(this.semver,">",e.semver,t)&&(this.operator==="<="||this.operator==="<")&&(e.operator===">="||e.operator===">");return i||n||s&&o||a||l}};oY.exports=Vg;var Hme=hd(),{re:rY,t:iY}=uc(),bv=mv(),Qv=fd(),nY=Li(),sY=us()});var yd=w((G$e,aY)=>{var Gme=us(),Yme=(r,e,t)=>{try{e=new Gme(e,t)}catch{return!1}return e.test(r)};aY.exports=Yme});var lY=w((Y$e,AY)=>{var jme=us(),qme=(r,e)=>new jme(r,e).set.map(t=>t.map(i=>i.value).join(" ").trim().split(" "));AY.exports=qme});var uY=w((j$e,cY)=>{var Jme=Li(),Wme=us(),zme=(r,e,t)=>{let i=null,n=null,s=null;try{s=new Wme(e,t)}catch{return null}return r.forEach(o=>{s.test(o)&&(!i||n.compare(o)===-1)&&(i=o,n=new Jme(i,t))}),i};cY.exports=zme});var fY=w((q$e,gY)=>{var Vme=Li(),Xme=us(),Zme=(r,e,t)=>{let i=null,n=null,s=null;try{s=new Xme(e,t)}catch{return null}return r.forEach(o=>{s.test(o)&&(!i||n.compare(o)===1)&&(i=o,n=new Vme(i,t))}),i};gY.exports=Zme});var dY=w((J$e,pY)=>{var Sv=Li(),_me=us(),hY=dd(),$me=(r,e)=>{r=new _me(r,e);let t=new Sv("0.0.0");if(r.test(t)||(t=new Sv("0.0.0-0"),r.test(t)))return t;t=null;for(let i=0;i{let a=new Sv(o.semver.version);switch(o.operator){case">":a.prerelease.length===0?a.patch++:a.prerelease.push(0),a.raw=a.format();case"":case">=":(!s||hY(a,s))&&(s=a);break;case"<":case"<=":break;default:throw new Error(`Unexpected operation: ${o.operator}`)}}),s&&(!t||hY(t,s))&&(t=s)}return t&&r.test(t)?t:null};pY.exports=$me});var mY=w((W$e,CY)=>{var eEe=us(),tEe=(r,e)=>{try{return new eEe(r,e).range||"*"}catch{return null}};CY.exports=tEe});var VI=w((z$e,wY)=>{var rEe=Li(),yY=Ed(),{ANY:iEe}=yY,nEe=us(),sEe=yd(),EY=dd(),IY=GI(),oEe=jI(),aEe=YI(),AEe=(r,e,t,i)=>{r=new rEe(r,i),e=new nEe(e,i);let n,s,o,a,l;switch(t){case">":n=EY,s=oEe,o=IY,a=">",l=">=";break;case"<":n=IY,s=aEe,o=EY,a="<",l="<=";break;default:throw new TypeError('Must provide a hilo val of "<" or ">"')}if(sEe(r,e,i))return!1;for(let c=0;c{h.semver===iEe&&(h=new yY(">=0.0.0")),g=g||h,f=f||h,n(h.semver,g.semver,i)?g=h:o(h.semver,f.semver,i)&&(f=h)}),g.operator===a||g.operator===l||(!f.operator||f.operator===a)&&s(r,f.semver))return!1;if(f.operator===l&&o(r,f.semver))return!1}return!0};wY.exports=AEe});var bY=w((V$e,BY)=>{var lEe=VI(),cEe=(r,e,t)=>lEe(r,e,">",t);BY.exports=cEe});var SY=w((X$e,QY)=>{var uEe=VI(),gEe=(r,e,t)=>uEe(r,e,"<",t);QY.exports=gEe});var PY=w((Z$e,xY)=>{var vY=us(),fEe=(r,e,t)=>(r=new vY(r,t),e=new vY(e,t),r.intersects(e));xY.exports=fEe});var kY=w((_$e,DY)=>{var hEe=yd(),pEe=cs();DY.exports=(r,e,t)=>{let i=[],n=null,s=null,o=r.sort((u,g)=>pEe(u,g,t));for(let u of o)hEe(u,e,t)?(s=u,n||(n=u)):(s&&i.push([n,s]),s=null,n=null);n&&i.push([n,null]);let a=[];for(let[u,g]of i)u===g?a.push(u):!g&&u===o[0]?a.push("*"):g?u===o[0]?a.push(`<=${g}`):a.push(`${u} - ${g}`):a.push(`>=${u}`);let l=a.join(" || "),c=typeof e.raw=="string"?e.raw:String(e);return l.length{var RY=us(),XI=Ed(),{ANY:vv}=XI,wd=yd(),xv=cs(),dEe=(r,e,t={})=>{if(r===e)return!0;r=new RY(r,t),e=new RY(e,t);let i=!1;e:for(let n of r.set){for(let s of e.set){let o=CEe(n,s,t);if(i=i||o!==null,o)continue e}if(i)return!1}return!0},CEe=(r,e,t)=>{if(r===e)return!0;if(r.length===1&&r[0].semver===vv){if(e.length===1&&e[0].semver===vv)return!0;t.includePrerelease?r=[new XI(">=0.0.0-0")]:r=[new XI(">=0.0.0")]}if(e.length===1&&e[0].semver===vv){if(t.includePrerelease)return!0;e=[new XI(">=0.0.0")]}let i=new Set,n,s;for(let h of r)h.operator===">"||h.operator===">="?n=FY(n,h,t):h.operator==="<"||h.operator==="<="?s=NY(s,h,t):i.add(h.semver);if(i.size>1)return null;let o;if(n&&s){if(o=xv(n.semver,s.semver,t),o>0)return null;if(o===0&&(n.operator!==">="||s.operator!=="<="))return null}for(let h of i){if(n&&!wd(h,String(n),t)||s&&!wd(h,String(s),t))return null;for(let p of e)if(!wd(h,String(p),t))return!1;return!0}let a,l,c,u,g=s&&!t.includePrerelease&&s.semver.prerelease.length?s.semver:!1,f=n&&!t.includePrerelease&&n.semver.prerelease.length?n.semver:!1;g&&g.prerelease.length===1&&s.operator==="<"&&g.prerelease[0]===0&&(g=!1);for(let h of e){if(u=u||h.operator===">"||h.operator===">=",c=c||h.operator==="<"||h.operator==="<=",n){if(f&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===f.major&&h.semver.minor===f.minor&&h.semver.patch===f.patch&&(f=!1),h.operator===">"||h.operator===">="){if(a=FY(n,h,t),a===h&&a!==n)return!1}else if(n.operator===">="&&!wd(n.semver,String(h),t))return!1}if(s){if(g&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===g.major&&h.semver.minor===g.minor&&h.semver.patch===g.patch&&(g=!1),h.operator==="<"||h.operator==="<="){if(l=NY(s,h,t),l===h&&l!==s)return!1}else if(s.operator==="<="&&!wd(s.semver,String(h),t))return!1}if(!h.operator&&(s||n)&&o!==0)return!1}return!(n&&c&&!s&&o!==0||s&&u&&!n&&o!==0||f||g)},FY=(r,e,t)=>{if(!r)return e;let i=xv(r.semver,e.semver,t);return i>0?r:i<0||e.operator===">"&&r.operator===">="?e:r},NY=(r,e,t)=>{if(!r)return e;let i=xv(r.semver,e.semver,t);return i<0?r:i>0||e.operator==="<"&&r.operator==="<="?e:r};TY.exports=dEe});var Xr=w((eet,MY)=>{var Pv=uc();MY.exports={re:Pv.re,src:Pv.src,tokens:Pv.t,SEMVER_SPEC_VERSION:gd().SEMVER_SPEC_VERSION,SemVer:Li(),compareIdentifiers:MI().compareIdentifiers,rcompareIdentifiers:MI().rcompareIdentifiers,parse:gc(),valid:aG(),clean:lG(),inc:uG(),diff:CG(),major:EG(),minor:yG(),patch:BG(),prerelease:QG(),compare:cs(),rcompare:vG(),compareLoose:PG(),compareBuild:HI(),sort:FG(),rsort:TG(),gt:dd(),lt:GI(),eq:UI(),neq:Cv(),gte:YI(),lte:jI(),cmp:mv(),coerce:YG(),Comparator:Ed(),Range:us(),satisfies:yd(),toComparators:lY(),maxSatisfying:uY(),minSatisfying:fY(),minVersion:dY(),validRange:mY(),outside:VI(),gtr:bY(),ltr:SY(),intersects:PY(),simplifyRange:kY(),subset:LY()}});var Dv=w(ZI=>{"use strict";Object.defineProperty(ZI,"__esModule",{value:!0});ZI.VERSION=void 0;ZI.VERSION="9.1.0"});var Gt=w((exports,module)=>{"use strict";var __spreadArray=exports&&exports.__spreadArray||function(r,e,t){if(t||arguments.length===2)for(var i=0,n=e.length,s;i{(function(r,e){typeof define=="function"&&define.amd?define([],e):typeof _I=="object"&&_I.exports?_I.exports=e():r.regexpToAst=e()})(typeof self<"u"?self:OY,function(){function r(){}r.prototype.saveState=function(){return{idx:this.idx,input:this.input,groupIdx:this.groupIdx}},r.prototype.restoreState=function(p){this.idx=p.idx,this.input=p.input,this.groupIdx=p.groupIdx},r.prototype.pattern=function(p){this.idx=0,this.input=p,this.groupIdx=0,this.consumeChar("/");var C=this.disjunction();this.consumeChar("/");for(var y={type:"Flags",loc:{begin:this.idx,end:p.length},global:!1,ignoreCase:!1,multiLine:!1,unicode:!1,sticky:!1};this.isRegExpFlag();)switch(this.popChar()){case"g":o(y,"global");break;case"i":o(y,"ignoreCase");break;case"m":o(y,"multiLine");break;case"u":o(y,"unicode");break;case"y":o(y,"sticky");break}if(this.idx!==this.input.length)throw Error("Redundant input: "+this.input.substring(this.idx));return{type:"Pattern",flags:y,value:C,loc:this.loc(0)}},r.prototype.disjunction=function(){var p=[],C=this.idx;for(p.push(this.alternative());this.peekChar()==="|";)this.consumeChar("|"),p.push(this.alternative());return{type:"Disjunction",value:p,loc:this.loc(C)}},r.prototype.alternative=function(){for(var p=[],C=this.idx;this.isTerm();)p.push(this.term());return{type:"Alternative",value:p,loc:this.loc(C)}},r.prototype.term=function(){return this.isAssertion()?this.assertion():this.atom()},r.prototype.assertion=function(){var p=this.idx;switch(this.popChar()){case"^":return{type:"StartAnchor",loc:this.loc(p)};case"$":return{type:"EndAnchor",loc:this.loc(p)};case"\\":switch(this.popChar()){case"b":return{type:"WordBoundary",loc:this.loc(p)};case"B":return{type:"NonWordBoundary",loc:this.loc(p)}}throw Error("Invalid Assertion Escape");case"(":this.consumeChar("?");var C;switch(this.popChar()){case"=":C="Lookahead";break;case"!":C="NegativeLookahead";break}a(C);var y=this.disjunction();return this.consumeChar(")"),{type:C,value:y,loc:this.loc(p)}}l()},r.prototype.quantifier=function(p){var C,y=this.idx;switch(this.popChar()){case"*":C={atLeast:0,atMost:1/0};break;case"+":C={atLeast:1,atMost:1/0};break;case"?":C={atLeast:0,atMost:1};break;case"{":var B=this.integerIncludingZero();switch(this.popChar()){case"}":C={atLeast:B,atMost:B};break;case",":var v;this.isDigit()?(v=this.integerIncludingZero(),C={atLeast:B,atMost:v}):C={atLeast:B,atMost:1/0},this.consumeChar("}");break}if(p===!0&&C===void 0)return;a(C);break}if(!(p===!0&&C===void 0))return a(C),this.peekChar(0)==="?"?(this.consumeChar("?"),C.greedy=!1):C.greedy=!0,C.type="Quantifier",C.loc=this.loc(y),C},r.prototype.atom=function(){var p,C=this.idx;switch(this.peekChar()){case".":p=this.dotAll();break;case"\\":p=this.atomEscape();break;case"[":p=this.characterClass();break;case"(":p=this.group();break}return p===void 0&&this.isPatternCharacter()&&(p=this.patternCharacter()),a(p),p.loc=this.loc(C),this.isQuantifier()&&(p.quantifier=this.quantifier()),p},r.prototype.dotAll=function(){return this.consumeChar("."),{type:"Set",complement:!0,value:[n(` +`),n("\r"),n("\u2028"),n("\u2029")]}},r.prototype.atomEscape=function(){switch(this.consumeChar("\\"),this.peekChar()){case"1":case"2":case"3":case"4":case"5":case"6":case"7":case"8":case"9":return this.decimalEscapeAtom();case"d":case"D":case"s":case"S":case"w":case"W":return this.characterClassEscape();case"f":case"n":case"r":case"t":case"v":return this.controlEscapeAtom();case"c":return this.controlLetterEscapeAtom();case"0":return this.nulCharacterAtom();case"x":return this.hexEscapeSequenceAtom();case"u":return this.regExpUnicodeEscapeSequenceAtom();default:return this.identityEscapeAtom()}},r.prototype.decimalEscapeAtom=function(){var p=this.positiveInteger();return{type:"GroupBackReference",value:p}},r.prototype.characterClassEscape=function(){var p,C=!1;switch(this.popChar()){case"d":p=u;break;case"D":p=u,C=!0;break;case"s":p=f;break;case"S":p=f,C=!0;break;case"w":p=g;break;case"W":p=g,C=!0;break}return a(p),{type:"Set",value:p,complement:C}},r.prototype.controlEscapeAtom=function(){var p;switch(this.popChar()){case"f":p=n("\f");break;case"n":p=n(` +`);break;case"r":p=n("\r");break;case"t":p=n(" ");break;case"v":p=n("\v");break}return a(p),{type:"Character",value:p}},r.prototype.controlLetterEscapeAtom=function(){this.consumeChar("c");var p=this.popChar();if(/[a-zA-Z]/.test(p)===!1)throw Error("Invalid ");var C=p.toUpperCase().charCodeAt(0)-64;return{type:"Character",value:C}},r.prototype.nulCharacterAtom=function(){return this.consumeChar("0"),{type:"Character",value:n("\0")}},r.prototype.hexEscapeSequenceAtom=function(){return this.consumeChar("x"),this.parseHexDigits(2)},r.prototype.regExpUnicodeEscapeSequenceAtom=function(){return this.consumeChar("u"),this.parseHexDigits(4)},r.prototype.identityEscapeAtom=function(){var p=this.popChar();return{type:"Character",value:n(p)}},r.prototype.classPatternCharacterAtom=function(){switch(this.peekChar()){case` +`:case"\r":case"\u2028":case"\u2029":case"\\":case"]":throw Error("TBD");default:var p=this.popChar();return{type:"Character",value:n(p)}}},r.prototype.characterClass=function(){var p=[],C=!1;for(this.consumeChar("["),this.peekChar(0)==="^"&&(this.consumeChar("^"),C=!0);this.isClassAtom();){var y=this.classAtom(),B=y.type==="Character";if(B&&this.isRangeDash()){this.consumeChar("-");var v=this.classAtom(),D=v.type==="Character";if(D){if(v.value=this.input.length)throw Error("Unexpected end of input");this.idx++},r.prototype.loc=function(p){return{begin:p,end:this.idx}};var e=/[0-9a-fA-F]/,t=/[0-9]/,i=/[1-9]/;function n(p){return p.charCodeAt(0)}function s(p,C){p.length!==void 0?p.forEach(function(y){C.push(y)}):C.push(p)}function o(p,C){if(p[C]===!0)throw"duplicate flag "+C;p[C]=!0}function a(p){if(p===void 0)throw Error("Internal Error - Should never get here!")}function l(){throw Error("Internal Error - Should never get here!")}var c,u=[];for(c=n("0");c<=n("9");c++)u.push(c);var g=[n("_")].concat(u);for(c=n("a");c<=n("z");c++)g.push(c);for(c=n("A");c<=n("Z");c++)g.push(c);var f=[n(" "),n("\f"),n(` +`),n("\r"),n(" "),n("\v"),n(" "),n("\xA0"),n("\u1680"),n("\u2000"),n("\u2001"),n("\u2002"),n("\u2003"),n("\u2004"),n("\u2005"),n("\u2006"),n("\u2007"),n("\u2008"),n("\u2009"),n("\u200A"),n("\u2028"),n("\u2029"),n("\u202F"),n("\u205F"),n("\u3000"),n("\uFEFF")];function h(){}return h.prototype.visitChildren=function(p){for(var C in p){var y=p[C];p.hasOwnProperty(C)&&(y.type!==void 0?this.visit(y):Array.isArray(y)&&y.forEach(function(B){this.visit(B)},this))}},h.prototype.visit=function(p){switch(p.type){case"Pattern":this.visitPattern(p);break;case"Flags":this.visitFlags(p);break;case"Disjunction":this.visitDisjunction(p);break;case"Alternative":this.visitAlternative(p);break;case"StartAnchor":this.visitStartAnchor(p);break;case"EndAnchor":this.visitEndAnchor(p);break;case"WordBoundary":this.visitWordBoundary(p);break;case"NonWordBoundary":this.visitNonWordBoundary(p);break;case"Lookahead":this.visitLookahead(p);break;case"NegativeLookahead":this.visitNegativeLookahead(p);break;case"Character":this.visitCharacter(p);break;case"Set":this.visitSet(p);break;case"Group":this.visitGroup(p);break;case"GroupBackReference":this.visitGroupBackReference(p);break;case"Quantifier":this.visitQuantifier(p);break}this.visitChildren(p)},h.prototype.visitPattern=function(p){},h.prototype.visitFlags=function(p){},h.prototype.visitDisjunction=function(p){},h.prototype.visitAlternative=function(p){},h.prototype.visitStartAnchor=function(p){},h.prototype.visitEndAnchor=function(p){},h.prototype.visitWordBoundary=function(p){},h.prototype.visitNonWordBoundary=function(p){},h.prototype.visitLookahead=function(p){},h.prototype.visitNegativeLookahead=function(p){},h.prototype.visitCharacter=function(p){},h.prototype.visitSet=function(p){},h.prototype.visitGroup=function(p){},h.prototype.visitGroupBackReference=function(p){},h.prototype.visitQuantifier=function(p){},{RegExpParser:r,BaseRegExpVisitor:h,VERSION:"0.5.0"}})});var ty=w(Xg=>{"use strict";Object.defineProperty(Xg,"__esModule",{value:!0});Xg.clearRegExpParserCache=Xg.getRegExpAst=void 0;var mEe=$I(),ey={},EEe=new mEe.RegExpParser;function IEe(r){var e=r.toString();if(ey.hasOwnProperty(e))return ey[e];var t=EEe.pattern(e);return ey[e]=t,t}Xg.getRegExpAst=IEe;function yEe(){ey={}}Xg.clearRegExpParserCache=yEe});var YY=w(Cn=>{"use strict";var wEe=Cn&&Cn.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Cn,"__esModule",{value:!0});Cn.canMatchCharCode=Cn.firstCharOptimizedIndices=Cn.getOptimizedStartCodesIndices=Cn.failedOptimizationPrefixMsg=void 0;var UY=$I(),gs=Gt(),HY=ty(),xa=Rv(),GY="Complement Sets are not supported for first char optimization";Cn.failedOptimizationPrefixMsg=`Unable to use "first char" lexer optimizations: +`;function BEe(r,e){e===void 0&&(e=!1);try{var t=(0,HY.getRegExpAst)(r),i=iy(t.value,{},t.flags.ignoreCase);return i}catch(s){if(s.message===GY)e&&(0,gs.PRINT_WARNING)(""+Cn.failedOptimizationPrefixMsg+(" Unable to optimize: < "+r.toString()+` > +`)+` Complement Sets cannot be automatically optimized. + This will disable the lexer's first char optimizations. + See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#COMPLEMENT for details.`);else{var n="";e&&(n=` + This will disable the lexer's first char optimizations. + See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#REGEXP_PARSING for details.`),(0,gs.PRINT_ERROR)(Cn.failedOptimizationPrefixMsg+` +`+(" Failed parsing: < "+r.toString()+` > +`)+(" Using the regexp-to-ast library version: "+UY.VERSION+` +`)+" Please open an issue at: https://github.com/bd82/regexp-to-ast/issues"+n)}}return[]}Cn.getOptimizedStartCodesIndices=BEe;function iy(r,e,t){switch(r.type){case"Disjunction":for(var i=0;i=xa.minOptimizationVal)for(var f=u.from>=xa.minOptimizationVal?u.from:xa.minOptimizationVal,h=u.to,p=(0,xa.charCodeToOptimizedIndex)(f),C=(0,xa.charCodeToOptimizedIndex)(h),y=p;y<=C;y++)e[y]=y}}});break;case"Group":iy(o.value,e,t);break;default:throw Error("Non Exhaustive Match")}var a=o.quantifier!==void 0&&o.quantifier.atLeast===0;if(o.type==="Group"&&kv(o)===!1||o.type!=="Group"&&a===!1)break}break;default:throw Error("non exhaustive match!")}return(0,gs.values)(e)}Cn.firstCharOptimizedIndices=iy;function ry(r,e,t){var i=(0,xa.charCodeToOptimizedIndex)(r);e[i]=i,t===!0&&bEe(r,e)}function bEe(r,e){var t=String.fromCharCode(r),i=t.toUpperCase();if(i!==t){var n=(0,xa.charCodeToOptimizedIndex)(i.charCodeAt(0));e[n]=n}else{var s=t.toLowerCase();if(s!==t){var n=(0,xa.charCodeToOptimizedIndex)(s.charCodeAt(0));e[n]=n}}}function KY(r,e){return(0,gs.find)(r.value,function(t){if(typeof t=="number")return(0,gs.contains)(e,t);var i=t;return(0,gs.find)(e,function(n){return i.from<=n&&n<=i.to})!==void 0})}function kv(r){return r.quantifier&&r.quantifier.atLeast===0?!0:r.value?(0,gs.isArray)(r.value)?(0,gs.every)(r.value,kv):kv(r.value):!1}var QEe=function(r){wEe(e,r);function e(t){var i=r.call(this)||this;return i.targetCharCodes=t,i.found=!1,i}return e.prototype.visitChildren=function(t){if(this.found!==!0){switch(t.type){case"Lookahead":this.visitLookahead(t);return;case"NegativeLookahead":this.visitNegativeLookahead(t);return}r.prototype.visitChildren.call(this,t)}},e.prototype.visitCharacter=function(t){(0,gs.contains)(this.targetCharCodes,t.value)&&(this.found=!0)},e.prototype.visitSet=function(t){t.complement?KY(t,this.targetCharCodes)===void 0&&(this.found=!0):KY(t,this.targetCharCodes)!==void 0&&(this.found=!0)},e}(UY.BaseRegExpVisitor);function SEe(r,e){if(e instanceof RegExp){var t=(0,HY.getRegExpAst)(e),i=new QEe(r);return i.visit(t),i.found}else return(0,gs.find)(e,function(n){return(0,gs.contains)(r,n.charCodeAt(0))})!==void 0}Cn.canMatchCharCode=SEe});var Rv=w(Ve=>{"use strict";var jY=Ve&&Ve.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Ve,"__esModule",{value:!0});Ve.charCodeToOptimizedIndex=Ve.minOptimizationVal=Ve.buildLineBreakIssueMessage=Ve.LineTerminatorOptimizedTester=Ve.isShortPattern=Ve.isCustomPattern=Ve.cloneEmptyGroups=Ve.performWarningRuntimeChecks=Ve.performRuntimeChecks=Ve.addStickyFlag=Ve.addStartOfInput=Ve.findUnreachablePatterns=Ve.findModesThatDoNotExist=Ve.findInvalidGroupType=Ve.findDuplicatePatterns=Ve.findUnsupportedFlags=Ve.findStartOfInputAnchor=Ve.findEmptyMatchRegExps=Ve.findEndOfInputAnchor=Ve.findInvalidPatterns=Ve.findMissingPatterns=Ve.validatePatterns=Ve.analyzeTokenTypes=Ve.enableSticky=Ve.disableSticky=Ve.SUPPORT_STICKY=Ve.MODES=Ve.DEFAULT_MODE=void 0;var qY=$I(),ir=Bd(),xe=Gt(),Zg=YY(),JY=ty(),Do="PATTERN";Ve.DEFAULT_MODE="defaultMode";Ve.MODES="modes";Ve.SUPPORT_STICKY=typeof new RegExp("(?:)").sticky=="boolean";function vEe(){Ve.SUPPORT_STICKY=!1}Ve.disableSticky=vEe;function xEe(){Ve.SUPPORT_STICKY=!0}Ve.enableSticky=xEe;function PEe(r,e){e=(0,xe.defaults)(e,{useSticky:Ve.SUPPORT_STICKY,debug:!1,safeMode:!1,positionTracking:"full",lineTerminatorCharacters:["\r",` +`],tracer:function(v,D){return D()}});var t=e.tracer;t("initCharCodeToOptimizedIndexMap",function(){KEe()});var i;t("Reject Lexer.NA",function(){i=(0,xe.reject)(r,function(v){return v[Do]===ir.Lexer.NA})});var n=!1,s;t("Transform Patterns",function(){n=!1,s=(0,xe.map)(i,function(v){var D=v[Do];if((0,xe.isRegExp)(D)){var T=D.source;return T.length===1&&T!=="^"&&T!=="$"&&T!=="."&&!D.ignoreCase?T:T.length===2&&T[0]==="\\"&&!(0,xe.contains)(["d","D","s","S","t","r","n","t","0","c","b","B","f","v","w","W"],T[1])?T[1]:e.useSticky?Tv(D):Nv(D)}else{if((0,xe.isFunction)(D))return n=!0,{exec:D};if((0,xe.has)(D,"exec"))return n=!0,D;if(typeof D=="string"){if(D.length===1)return D;var H=D.replace(/[\\^$.*+?()[\]{}|]/g,"\\$&"),j=new RegExp(H);return e.useSticky?Tv(j):Nv(j)}else throw Error("non exhaustive match")}})});var o,a,l,c,u;t("misc mapping",function(){o=(0,xe.map)(i,function(v){return v.tokenTypeIdx}),a=(0,xe.map)(i,function(v){var D=v.GROUP;if(D!==ir.Lexer.SKIPPED){if((0,xe.isString)(D))return D;if((0,xe.isUndefined)(D))return!1;throw Error("non exhaustive match")}}),l=(0,xe.map)(i,function(v){var D=v.LONGER_ALT;if(D){var T=(0,xe.isArray)(D)?(0,xe.map)(D,function(H){return(0,xe.indexOf)(i,H)}):[(0,xe.indexOf)(i,D)];return T}}),c=(0,xe.map)(i,function(v){return v.PUSH_MODE}),u=(0,xe.map)(i,function(v){return(0,xe.has)(v,"POP_MODE")})});var g;t("Line Terminator Handling",function(){var v=oj(e.lineTerminatorCharacters);g=(0,xe.map)(i,function(D){return!1}),e.positionTracking!=="onlyOffset"&&(g=(0,xe.map)(i,function(D){if((0,xe.has)(D,"LINE_BREAKS"))return D.LINE_BREAKS;if(nj(D,v)===!1)return(0,Zg.canMatchCharCode)(v,D.PATTERN)}))});var f,h,p,C;t("Misc Mapping #2",function(){f=(0,xe.map)(i,Mv),h=(0,xe.map)(s,ij),p=(0,xe.reduce)(i,function(v,D){var T=D.GROUP;return(0,xe.isString)(T)&&T!==ir.Lexer.SKIPPED&&(v[T]=[]),v},{}),C=(0,xe.map)(s,function(v,D){return{pattern:s[D],longerAlt:l[D],canLineTerminator:g[D],isCustom:f[D],short:h[D],group:a[D],push:c[D],pop:u[D],tokenTypeIdx:o[D],tokenType:i[D]}})});var y=!0,B=[];return e.safeMode||t("First Char Optimization",function(){B=(0,xe.reduce)(i,function(v,D,T){if(typeof D.PATTERN=="string"){var H=D.PATTERN.charCodeAt(0),j=Lv(H);Fv(v,j,C[T])}else if((0,xe.isArray)(D.START_CHARS_HINT)){var $;(0,xe.forEach)(D.START_CHARS_HINT,function(W){var _=typeof W=="string"?W.charCodeAt(0):W,A=Lv(_);$!==A&&($=A,Fv(v,A,C[T]))})}else if((0,xe.isRegExp)(D.PATTERN))if(D.PATTERN.unicode)y=!1,e.ensureOptimizations&&(0,xe.PRINT_ERROR)(""+Zg.failedOptimizationPrefixMsg+(" Unable to analyze < "+D.PATTERN.toString()+` > pattern. +`)+` The regexp unicode flag is not currently supported by the regexp-to-ast library. + This will disable the lexer's first char optimizations. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNICODE_OPTIMIZE`);else{var V=(0,Zg.getOptimizedStartCodesIndices)(D.PATTERN,e.ensureOptimizations);(0,xe.isEmpty)(V)&&(y=!1),(0,xe.forEach)(V,function(W){Fv(v,W,C[T])})}else e.ensureOptimizations&&(0,xe.PRINT_ERROR)(""+Zg.failedOptimizationPrefixMsg+(" TokenType: <"+D.name+`> is using a custom token pattern without providing parameter. +`)+` This will disable the lexer's first char optimizations. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_OPTIMIZE`),y=!1;return v},[])}),t("ArrayPacking",function(){B=(0,xe.packArray)(B)}),{emptyGroups:p,patternIdxToConfig:C,charCodeToPatternIdxToConfig:B,hasCustom:n,canBeOptimized:y}}Ve.analyzeTokenTypes=PEe;function DEe(r,e){var t=[],i=WY(r);t=t.concat(i.errors);var n=zY(i.valid),s=n.valid;return t=t.concat(n.errors),t=t.concat(kEe(s)),t=t.concat(ej(s)),t=t.concat(tj(s,e)),t=t.concat(rj(s)),t}Ve.validatePatterns=DEe;function kEe(r){var e=[],t=(0,xe.filter)(r,function(i){return(0,xe.isRegExp)(i[Do])});return e=e.concat(VY(t)),e=e.concat(ZY(t)),e=e.concat(_Y(t)),e=e.concat($Y(t)),e=e.concat(XY(t)),e}function WY(r){var e=(0,xe.filter)(r,function(n){return!(0,xe.has)(n,Do)}),t=(0,xe.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- missing static 'PATTERN' property",type:ir.LexerDefinitionErrorType.MISSING_PATTERN,tokenTypes:[n]}}),i=(0,xe.difference)(r,e);return{errors:t,valid:i}}Ve.findMissingPatterns=WY;function zY(r){var e=(0,xe.filter)(r,function(n){var s=n[Do];return!(0,xe.isRegExp)(s)&&!(0,xe.isFunction)(s)&&!(0,xe.has)(s,"exec")&&!(0,xe.isString)(s)}),t=(0,xe.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- static 'PATTERN' can only be a RegExp, a Function matching the {CustomPatternMatcherFunc} type or an Object matching the {ICustomPattern} interface.",type:ir.LexerDefinitionErrorType.INVALID_PATTERN,tokenTypes:[n]}}),i=(0,xe.difference)(r,e);return{errors:t,valid:i}}Ve.findInvalidPatterns=zY;var REe=/[^\\][\$]/;function VY(r){var e=function(n){jY(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitEndAnchor=function(o){this.found=!0},s}(qY.BaseRegExpVisitor),t=(0,xe.filter)(r,function(n){var s=n[Do];try{var o=(0,JY.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch{return REe.test(s.source)}}),i=(0,xe.map)(t,function(n){return{message:`Unexpected RegExp Anchor Error: + Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain end of input anchor '$' + See chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:ir.LexerDefinitionErrorType.EOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ve.findEndOfInputAnchor=VY;function XY(r){var e=(0,xe.filter)(r,function(i){var n=i[Do];return n.test("")}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' must not match an empty string",type:ir.LexerDefinitionErrorType.EMPTY_MATCH_PATTERN,tokenTypes:[i]}});return t}Ve.findEmptyMatchRegExps=XY;var FEe=/[^\\[][\^]|^\^/;function ZY(r){var e=function(n){jY(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitStartAnchor=function(o){this.found=!0},s}(qY.BaseRegExpVisitor),t=(0,xe.filter)(r,function(n){var s=n[Do];try{var o=(0,JY.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch{return FEe.test(s.source)}}),i=(0,xe.map)(t,function(n){return{message:`Unexpected RegExp Anchor Error: + Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain start of input anchor '^' + See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:ir.LexerDefinitionErrorType.SOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ve.findStartOfInputAnchor=ZY;function _Y(r){var e=(0,xe.filter)(r,function(i){var n=i[Do];return n instanceof RegExp&&(n.multiline||n.global)}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' may NOT contain global('g') or multiline('m')",type:ir.LexerDefinitionErrorType.UNSUPPORTED_FLAGS_FOUND,tokenTypes:[i]}});return t}Ve.findUnsupportedFlags=_Y;function $Y(r){var e=[],t=(0,xe.map)(r,function(s){return(0,xe.reduce)(r,function(o,a){return s.PATTERN.source===a.PATTERN.source&&!(0,xe.contains)(e,a)&&a.PATTERN!==ir.Lexer.NA&&(e.push(a),o.push(a)),o},[])});t=(0,xe.compact)(t);var i=(0,xe.filter)(t,function(s){return s.length>1}),n=(0,xe.map)(i,function(s){var o=(0,xe.map)(s,function(l){return l.name}),a=(0,xe.first)(s).PATTERN;return{message:"The same RegExp pattern ->"+a+"<-"+("has been used in all of the following Token Types: "+o.join(", ")+" <-"),type:ir.LexerDefinitionErrorType.DUPLICATE_PATTERNS_FOUND,tokenTypes:s}});return n}Ve.findDuplicatePatterns=$Y;function ej(r){var e=(0,xe.filter)(r,function(i){if(!(0,xe.has)(i,"GROUP"))return!1;var n=i.GROUP;return n!==ir.Lexer.SKIPPED&&n!==ir.Lexer.NA&&!(0,xe.isString)(n)}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'GROUP' can only be Lexer.SKIPPED/Lexer.NA/A String",type:ir.LexerDefinitionErrorType.INVALID_GROUP_TYPE_FOUND,tokenTypes:[i]}});return t}Ve.findInvalidGroupType=ej;function tj(r,e){var t=(0,xe.filter)(r,function(n){return n.PUSH_MODE!==void 0&&!(0,xe.contains)(e,n.PUSH_MODE)}),i=(0,xe.map)(t,function(n){var s="Token Type: ->"+n.name+"<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->"+n.PUSH_MODE+"<-which does not exist";return{message:s,type:ir.LexerDefinitionErrorType.PUSH_MODE_DOES_NOT_EXIST,tokenTypes:[n]}});return i}Ve.findModesThatDoNotExist=tj;function rj(r){var e=[],t=(0,xe.reduce)(r,function(i,n,s){var o=n.PATTERN;return o===ir.Lexer.NA||((0,xe.isString)(o)?i.push({str:o,idx:s,tokenType:n}):(0,xe.isRegExp)(o)&&TEe(o)&&i.push({str:o.source,idx:s,tokenType:n})),i},[]);return(0,xe.forEach)(r,function(i,n){(0,xe.forEach)(t,function(s){var o=s.str,a=s.idx,l=s.tokenType;if(n"+i.name+"<-")+`in the lexer's definition. +See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNREACHABLE`;e.push({message:c,type:ir.LexerDefinitionErrorType.UNREACHABLE_PATTERN,tokenTypes:[i,l]})}})}),e}Ve.findUnreachablePatterns=rj;function NEe(r,e){if((0,xe.isRegExp)(e)){var t=e.exec(r);return t!==null&&t.index===0}else{if((0,xe.isFunction)(e))return e(r,0,[],{});if((0,xe.has)(e,"exec"))return e.exec(r,0,[],{});if(typeof e=="string")return e===r;throw Error("non exhaustive match")}}function TEe(r){var e=[".","\\","[","]","|","^","$","(",")","?","*","+","{"];return(0,xe.find)(e,function(t){return r.source.indexOf(t)!==-1})===void 0}function Nv(r){var e=r.ignoreCase?"i":"";return new RegExp("^(?:"+r.source+")",e)}Ve.addStartOfInput=Nv;function Tv(r){var e=r.ignoreCase?"iy":"y";return new RegExp(""+r.source,e)}Ve.addStickyFlag=Tv;function LEe(r,e,t){var i=[];return(0,xe.has)(r,Ve.DEFAULT_MODE)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ve.DEFAULT_MODE+`> property in its definition +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE}),(0,xe.has)(r,Ve.MODES)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ve.MODES+`> property in its definition +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY}),(0,xe.has)(r,Ve.MODES)&&(0,xe.has)(r,Ve.DEFAULT_MODE)&&!(0,xe.has)(r.modes,r.defaultMode)&&i.push({message:"A MultiMode Lexer cannot be initialized with a "+Ve.DEFAULT_MODE+": <"+r.defaultMode+`>which does not exist +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST}),(0,xe.has)(r,Ve.MODES)&&(0,xe.forEach)(r.modes,function(n,s){(0,xe.forEach)(n,function(o,a){(0,xe.isUndefined)(o)&&i.push({message:"A Lexer cannot be initialized using an undefined Token Type. Mode:"+("<"+s+"> at index: <"+a+`> +`),type:ir.LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED})})}),i}Ve.performRuntimeChecks=LEe;function MEe(r,e,t){var i=[],n=!1,s=(0,xe.compact)((0,xe.flatten)((0,xe.mapValues)(r.modes,function(l){return l}))),o=(0,xe.reject)(s,function(l){return l[Do]===ir.Lexer.NA}),a=oj(t);return e&&(0,xe.forEach)(o,function(l){var c=nj(l,a);if(c!==!1){var u=sj(l,c),g={message:u,type:c.issue,tokenType:l};i.push(g)}else(0,xe.has)(l,"LINE_BREAKS")?l.LINE_BREAKS===!0&&(n=!0):(0,Zg.canMatchCharCode)(a,l.PATTERN)&&(n=!0)}),e&&!n&&i.push({message:`Warning: No LINE_BREAKS Found. + This Lexer has been defined to track line and column information, + But none of the Token Types can be identified as matching a line terminator. + See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#LINE_BREAKS + for details.`,type:ir.LexerDefinitionErrorType.NO_LINE_BREAKS_FLAGS}),i}Ve.performWarningRuntimeChecks=MEe;function OEe(r){var e={},t=(0,xe.keys)(r);return(0,xe.forEach)(t,function(i){var n=r[i];if((0,xe.isArray)(n))e[i]=[];else throw Error("non exhaustive match")}),e}Ve.cloneEmptyGroups=OEe;function Mv(r){var e=r.PATTERN;if((0,xe.isRegExp)(e))return!1;if((0,xe.isFunction)(e))return!0;if((0,xe.has)(e,"exec"))return!0;if((0,xe.isString)(e))return!1;throw Error("non exhaustive match")}Ve.isCustomPattern=Mv;function ij(r){return(0,xe.isString)(r)&&r.length===1?r.charCodeAt(0):!1}Ve.isShortPattern=ij;Ve.LineTerminatorOptimizedTester={test:function(r){for(var e=r.length,t=this.lastIndex;t Token Type +`)+(" Root cause: "+e.errMsg+`. +`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#IDENTIFY_TERMINATOR";if(e.issue===ir.LexerDefinitionErrorType.CUSTOM_LINE_BREAK)return`Warning: A Custom Token Pattern should specify the option. +`+(" The problem is in the <"+r.name+`> Token Type +`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_LINE_BREAK";throw Error("non exhaustive match")}Ve.buildLineBreakIssueMessage=sj;function oj(r){var e=(0,xe.map)(r,function(t){return(0,xe.isString)(t)&&t.length>0?t.charCodeAt(0):t});return e}function Fv(r,e,t){r[e]===void 0?r[e]=[t]:r[e].push(t)}Ve.minOptimizationVal=256;var ny=[];function Lv(r){return r255?255+~~(r/255):r}}});var _g=w(Nt=>{"use strict";Object.defineProperty(Nt,"__esModule",{value:!0});Nt.isTokenType=Nt.hasExtendingTokensTypesMapProperty=Nt.hasExtendingTokensTypesProperty=Nt.hasCategoriesProperty=Nt.hasShortKeyProperty=Nt.singleAssignCategoriesToksMap=Nt.assignCategoriesMapProp=Nt.assignCategoriesTokensProp=Nt.assignTokenDefaultProps=Nt.expandCategories=Nt.augmentTokenTypes=Nt.tokenIdxToClass=Nt.tokenShortNameIdx=Nt.tokenStructuredMatcherNoCategories=Nt.tokenStructuredMatcher=void 0;var Zr=Gt();function UEe(r,e){var t=r.tokenTypeIdx;return t===e.tokenTypeIdx?!0:e.isParent===!0&&e.categoryMatchesMap[t]===!0}Nt.tokenStructuredMatcher=UEe;function HEe(r,e){return r.tokenTypeIdx===e.tokenTypeIdx}Nt.tokenStructuredMatcherNoCategories=HEe;Nt.tokenShortNameIdx=1;Nt.tokenIdxToClass={};function GEe(r){var e=aj(r);Aj(e),cj(e),lj(e),(0,Zr.forEach)(e,function(t){t.isParent=t.categoryMatches.length>0})}Nt.augmentTokenTypes=GEe;function aj(r){for(var e=(0,Zr.cloneArr)(r),t=r,i=!0;i;){t=(0,Zr.compact)((0,Zr.flatten)((0,Zr.map)(t,function(s){return s.CATEGORIES})));var n=(0,Zr.difference)(t,e);e=e.concat(n),(0,Zr.isEmpty)(n)?i=!1:t=n}return e}Nt.expandCategories=aj;function Aj(r){(0,Zr.forEach)(r,function(e){uj(e)||(Nt.tokenIdxToClass[Nt.tokenShortNameIdx]=e,e.tokenTypeIdx=Nt.tokenShortNameIdx++),Ov(e)&&!(0,Zr.isArray)(e.CATEGORIES)&&(e.CATEGORIES=[e.CATEGORIES]),Ov(e)||(e.CATEGORIES=[]),gj(e)||(e.categoryMatches=[]),fj(e)||(e.categoryMatchesMap={})})}Nt.assignTokenDefaultProps=Aj;function lj(r){(0,Zr.forEach)(r,function(e){e.categoryMatches=[],(0,Zr.forEach)(e.categoryMatchesMap,function(t,i){e.categoryMatches.push(Nt.tokenIdxToClass[i].tokenTypeIdx)})})}Nt.assignCategoriesTokensProp=lj;function cj(r){(0,Zr.forEach)(r,function(e){Kv([],e)})}Nt.assignCategoriesMapProp=cj;function Kv(r,e){(0,Zr.forEach)(r,function(t){e.categoryMatchesMap[t.tokenTypeIdx]=!0}),(0,Zr.forEach)(e.CATEGORIES,function(t){var i=r.concat(e);(0,Zr.contains)(i,t)||Kv(i,t)})}Nt.singleAssignCategoriesToksMap=Kv;function uj(r){return(0,Zr.has)(r,"tokenTypeIdx")}Nt.hasShortKeyProperty=uj;function Ov(r){return(0,Zr.has)(r,"CATEGORIES")}Nt.hasCategoriesProperty=Ov;function gj(r){return(0,Zr.has)(r,"categoryMatches")}Nt.hasExtendingTokensTypesProperty=gj;function fj(r){return(0,Zr.has)(r,"categoryMatchesMap")}Nt.hasExtendingTokensTypesMapProperty=fj;function YEe(r){return(0,Zr.has)(r,"tokenTypeIdx")}Nt.isTokenType=YEe});var Uv=w(sy=>{"use strict";Object.defineProperty(sy,"__esModule",{value:!0});sy.defaultLexerErrorProvider=void 0;sy.defaultLexerErrorProvider={buildUnableToPopLexerModeMessage:function(r){return"Unable to pop Lexer Mode after encountering Token ->"+r.image+"<- The Mode Stack is empty"},buildUnexpectedCharactersMessage:function(r,e,t,i,n){return"unexpected character: ->"+r.charAt(e)+"<- at offset: "+e+","+(" skipped "+t+" characters.")}}});var Bd=w(Cc=>{"use strict";Object.defineProperty(Cc,"__esModule",{value:!0});Cc.Lexer=Cc.LexerDefinitionErrorType=void 0;var _s=Rv(),nr=Gt(),jEe=_g(),qEe=Uv(),JEe=ty(),WEe;(function(r){r[r.MISSING_PATTERN=0]="MISSING_PATTERN",r[r.INVALID_PATTERN=1]="INVALID_PATTERN",r[r.EOI_ANCHOR_FOUND=2]="EOI_ANCHOR_FOUND",r[r.UNSUPPORTED_FLAGS_FOUND=3]="UNSUPPORTED_FLAGS_FOUND",r[r.DUPLICATE_PATTERNS_FOUND=4]="DUPLICATE_PATTERNS_FOUND",r[r.INVALID_GROUP_TYPE_FOUND=5]="INVALID_GROUP_TYPE_FOUND",r[r.PUSH_MODE_DOES_NOT_EXIST=6]="PUSH_MODE_DOES_NOT_EXIST",r[r.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE=7]="MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE",r[r.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY=8]="MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY",r[r.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST=9]="MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST",r[r.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED=10]="LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED",r[r.SOI_ANCHOR_FOUND=11]="SOI_ANCHOR_FOUND",r[r.EMPTY_MATCH_PATTERN=12]="EMPTY_MATCH_PATTERN",r[r.NO_LINE_BREAKS_FLAGS=13]="NO_LINE_BREAKS_FLAGS",r[r.UNREACHABLE_PATTERN=14]="UNREACHABLE_PATTERN",r[r.IDENTIFY_TERMINATOR=15]="IDENTIFY_TERMINATOR",r[r.CUSTOM_LINE_BREAK=16]="CUSTOM_LINE_BREAK"})(WEe=Cc.LexerDefinitionErrorType||(Cc.LexerDefinitionErrorType={}));var bd={deferDefinitionErrorsHandling:!1,positionTracking:"full",lineTerminatorsPattern:/\n|\r\n?/g,lineTerminatorCharacters:[` +`,"\r"],ensureOptimizations:!1,safeMode:!1,errorMessageProvider:qEe.defaultLexerErrorProvider,traceInitPerf:!1,skipValidations:!1};Object.freeze(bd);var zEe=function(){function r(e,t){var i=this;if(t===void 0&&(t=bd),this.lexerDefinition=e,this.lexerDefinitionErrors=[],this.lexerDefinitionWarning=[],this.patternIdxToConfig={},this.charCodeToPatternIdxToConfig={},this.modes=[],this.emptyGroups={},this.config=void 0,this.trackStartLines=!0,this.trackEndLines=!0,this.hasCustom=!1,this.canModeBeOptimized={},typeof t=="boolean")throw Error(`The second argument to the Lexer constructor is now an ILexerConfig Object. +a boolean 2nd argument is no longer supported`);this.config=(0,nr.merge)(bd,t);var n=this.config.traceInitPerf;n===!0?(this.traceInitMaxIdent=1/0,this.traceInitPerf=!0):typeof n=="number"&&(this.traceInitMaxIdent=n,this.traceInitPerf=!0),this.traceInitIndent=-1,this.TRACE_INIT("Lexer Constructor",function(){var s,o=!0;i.TRACE_INIT("Lexer Config handling",function(){if(i.config.lineTerminatorsPattern===bd.lineTerminatorsPattern)i.config.lineTerminatorsPattern=_s.LineTerminatorOptimizedTester;else if(i.config.lineTerminatorCharacters===bd.lineTerminatorCharacters)throw Error(`Error: Missing property on the Lexer config. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#MISSING_LINE_TERM_CHARS`);if(t.safeMode&&t.ensureOptimizations)throw Error('"safeMode" and "ensureOptimizations" flags are mutually exclusive.');i.trackStartLines=/full|onlyStart/i.test(i.config.positionTracking),i.trackEndLines=/full/i.test(i.config.positionTracking),(0,nr.isArray)(e)?(s={modes:{}},s.modes[_s.DEFAULT_MODE]=(0,nr.cloneArr)(e),s[_s.DEFAULT_MODE]=_s.DEFAULT_MODE):(o=!1,s=(0,nr.cloneObj)(e))}),i.config.skipValidations===!1&&(i.TRACE_INIT("performRuntimeChecks",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,_s.performRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))}),i.TRACE_INIT("performWarningRuntimeChecks",function(){i.lexerDefinitionWarning=i.lexerDefinitionWarning.concat((0,_s.performWarningRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))})),s.modes=s.modes?s.modes:{},(0,nr.forEach)(s.modes,function(u,g){s.modes[g]=(0,nr.reject)(u,function(f){return(0,nr.isUndefined)(f)})});var a=(0,nr.keys)(s.modes);if((0,nr.forEach)(s.modes,function(u,g){i.TRACE_INIT("Mode: <"+g+"> processing",function(){if(i.modes.push(g),i.config.skipValidations===!1&&i.TRACE_INIT("validatePatterns",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,_s.validatePatterns)(u,a))}),(0,nr.isEmpty)(i.lexerDefinitionErrors)){(0,jEe.augmentTokenTypes)(u);var f;i.TRACE_INIT("analyzeTokenTypes",function(){f=(0,_s.analyzeTokenTypes)(u,{lineTerminatorCharacters:i.config.lineTerminatorCharacters,positionTracking:t.positionTracking,ensureOptimizations:t.ensureOptimizations,safeMode:t.safeMode,tracer:i.TRACE_INIT.bind(i)})}),i.patternIdxToConfig[g]=f.patternIdxToConfig,i.charCodeToPatternIdxToConfig[g]=f.charCodeToPatternIdxToConfig,i.emptyGroups=(0,nr.merge)(i.emptyGroups,f.emptyGroups),i.hasCustom=f.hasCustom||i.hasCustom,i.canModeBeOptimized[g]=f.canBeOptimized}})}),i.defaultMode=s.defaultMode,!(0,nr.isEmpty)(i.lexerDefinitionErrors)&&!i.config.deferDefinitionErrorsHandling){var l=(0,nr.map)(i.lexerDefinitionErrors,function(u){return u.message}),c=l.join(`----------------------- +`);throw new Error(`Errors detected in definition of Lexer: +`+c)}(0,nr.forEach)(i.lexerDefinitionWarning,function(u){(0,nr.PRINT_WARNING)(u.message)}),i.TRACE_INIT("Choosing sub-methods implementations",function(){if(_s.SUPPORT_STICKY?(i.chopInput=nr.IDENTITY,i.match=i.matchWithTest):(i.updateLastIndex=nr.NOOP,i.match=i.matchWithExec),o&&(i.handleModes=nr.NOOP),i.trackStartLines===!1&&(i.computeNewColumn=nr.IDENTITY),i.trackEndLines===!1&&(i.updateTokenEndLineColumnLocation=nr.NOOP),/full/i.test(i.config.positionTracking))i.createTokenInstance=i.createFullToken;else if(/onlyStart/i.test(i.config.positionTracking))i.createTokenInstance=i.createStartOnlyToken;else if(/onlyOffset/i.test(i.config.positionTracking))i.createTokenInstance=i.createOffsetOnlyToken;else throw Error('Invalid config option: "'+i.config.positionTracking+'"');i.hasCustom?(i.addToken=i.addTokenUsingPush,i.handlePayload=i.handlePayloadWithCustom):(i.addToken=i.addTokenUsingMemberAccess,i.handlePayload=i.handlePayloadNoCustom)}),i.TRACE_INIT("Failed Optimization Warnings",function(){var u=(0,nr.reduce)(i.canModeBeOptimized,function(g,f,h){return f===!1&&g.push(h),g},[]);if(t.ensureOptimizations&&!(0,nr.isEmpty)(u))throw Error("Lexer Modes: < "+u.join(", ")+` > cannot be optimized. + Disable the "ensureOptimizations" lexer config flag to silently ignore this and run the lexer in an un-optimized mode. + Or inspect the console log for details on how to resolve these issues.`)}),i.TRACE_INIT("clearRegExpParserCache",function(){(0,JEe.clearRegExpParserCache)()}),i.TRACE_INIT("toFastProperties",function(){(0,nr.toFastProperties)(i)})})}return r.prototype.tokenize=function(e,t){if(t===void 0&&(t=this.defaultMode),!(0,nr.isEmpty)(this.lexerDefinitionErrors)){var i=(0,nr.map)(this.lexerDefinitionErrors,function(o){return o.message}),n=i.join(`----------------------- +`);throw new Error(`Unable to Tokenize because Errors detected in definition of Lexer: +`+n)}var s=this.tokenizeInternal(e,t);return s},r.prototype.tokenizeInternal=function(e,t){var i=this,n,s,o,a,l,c,u,g,f,h,p,C,y,B,v,D,T=e,H=T.length,j=0,$=0,V=this.hasCustom?0:Math.floor(e.length/10),W=new Array(V),_=[],A=this.trackStartLines?1:void 0,Ae=this.trackStartLines?1:void 0,ge=(0,_s.cloneEmptyGroups)(this.emptyGroups),re=this.trackStartLines,M=this.config.lineTerminatorsPattern,F=0,ue=[],pe=[],ke=[],Fe=[];Object.freeze(Fe);var Ne=void 0;function oe(){return ue}function le(pr){var Ii=(0,_s.charCodeToOptimizedIndex)(pr),rs=pe[Ii];return rs===void 0?Fe:rs}var Be=function(pr){if(ke.length===1&&pr.tokenType.PUSH_MODE===void 0){var Ii=i.config.errorMessageProvider.buildUnableToPopLexerModeMessage(pr);_.push({offset:pr.startOffset,line:pr.startLine!==void 0?pr.startLine:void 0,column:pr.startColumn!==void 0?pr.startColumn:void 0,length:pr.image.length,message:Ii})}else{ke.pop();var rs=(0,nr.last)(ke);ue=i.patternIdxToConfig[rs],pe=i.charCodeToPatternIdxToConfig[rs],F=ue.length;var fa=i.canModeBeOptimized[rs]&&i.config.safeMode===!1;pe&&fa?Ne=le:Ne=oe}};function fe(pr){ke.push(pr),pe=this.charCodeToPatternIdxToConfig[pr],ue=this.patternIdxToConfig[pr],F=ue.length,F=ue.length;var Ii=this.canModeBeOptimized[pr]&&this.config.safeMode===!1;pe&&Ii?Ne=le:Ne=oe}fe.call(this,t);for(var ae;jc.length){c=a,u=g,ae=_e;break}}}break}}if(c!==null){if(f=c.length,h=ae.group,h!==void 0&&(p=ae.tokenTypeIdx,C=this.createTokenInstance(c,j,p,ae.tokenType,A,Ae,f),this.handlePayload(C,u),h===!1?$=this.addToken(W,$,C):ge[h].push(C)),e=this.chopInput(e,f),j=j+f,Ae=this.computeNewColumn(Ae,f),re===!0&&ae.canLineTerminator===!0){var It=0,Mr=void 0,ii=void 0;M.lastIndex=0;do Mr=M.test(c),Mr===!0&&(ii=M.lastIndex-1,It++);while(Mr===!0);It!==0&&(A=A+It,Ae=f-ii,this.updateTokenEndLineColumnLocation(C,h,ii,It,A,Ae,f))}this.handleModes(ae,Be,fe,C)}else{for(var gi=j,hr=A,fi=Ae,ni=!1;!ni&&j <"+e+">");var n=(0,nr.timer)(t),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent time: "+s+"ms"),this.traceInitIndent--,o}else return t()},r.SKIPPED="This marks a skipped Token pattern, this means each token identified by it willbe consumed and then thrown into oblivion, this can be used to for example to completely ignore whitespace.",r.NA=/NOT_APPLICABLE/,r}();Cc.Lexer=zEe});var LA=w(Qi=>{"use strict";Object.defineProperty(Qi,"__esModule",{value:!0});Qi.tokenMatcher=Qi.createTokenInstance=Qi.EOF=Qi.createToken=Qi.hasTokenLabel=Qi.tokenName=Qi.tokenLabel=void 0;var $s=Gt(),VEe=Bd(),Hv=_g();function XEe(r){return wj(r)?r.LABEL:r.name}Qi.tokenLabel=XEe;function ZEe(r){return r.name}Qi.tokenName=ZEe;function wj(r){return(0,$s.isString)(r.LABEL)&&r.LABEL!==""}Qi.hasTokenLabel=wj;var _Ee="parent",hj="categories",pj="label",dj="group",Cj="push_mode",mj="pop_mode",Ej="longer_alt",Ij="line_breaks",yj="start_chars_hint";function Bj(r){return $Ee(r)}Qi.createToken=Bj;function $Ee(r){var e=r.pattern,t={};if(t.name=r.name,(0,$s.isUndefined)(e)||(t.PATTERN=e),(0,$s.has)(r,_Ee))throw`The parent property is no longer supported. +See: https://github.com/chevrotain/chevrotain/issues/564#issuecomment-349062346 for details.`;return(0,$s.has)(r,hj)&&(t.CATEGORIES=r[hj]),(0,Hv.augmentTokenTypes)([t]),(0,$s.has)(r,pj)&&(t.LABEL=r[pj]),(0,$s.has)(r,dj)&&(t.GROUP=r[dj]),(0,$s.has)(r,mj)&&(t.POP_MODE=r[mj]),(0,$s.has)(r,Cj)&&(t.PUSH_MODE=r[Cj]),(0,$s.has)(r,Ej)&&(t.LONGER_ALT=r[Ej]),(0,$s.has)(r,Ij)&&(t.LINE_BREAKS=r[Ij]),(0,$s.has)(r,yj)&&(t.START_CHARS_HINT=r[yj]),t}Qi.EOF=Bj({name:"EOF",pattern:VEe.Lexer.NA});(0,Hv.augmentTokenTypes)([Qi.EOF]);function eIe(r,e,t,i,n,s,o,a){return{image:e,startOffset:t,endOffset:i,startLine:n,endLine:s,startColumn:o,endColumn:a,tokenTypeIdx:r.tokenTypeIdx,tokenType:r}}Qi.createTokenInstance=eIe;function tIe(r,e){return(0,Hv.tokenStructuredMatcher)(r,e)}Qi.tokenMatcher=tIe});var mn=w(zt=>{"use strict";var Pa=zt&&zt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(zt,"__esModule",{value:!0});zt.serializeProduction=zt.serializeGrammar=zt.Terminal=zt.Alternation=zt.RepetitionWithSeparator=zt.Repetition=zt.RepetitionMandatoryWithSeparator=zt.RepetitionMandatory=zt.Option=zt.Alternative=zt.Rule=zt.NonTerminal=zt.AbstractProduction=void 0;var Ar=Gt(),rIe=LA(),ko=function(){function r(e){this._definition=e}return Object.defineProperty(r.prototype,"definition",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),r.prototype.accept=function(e){e.visit(this),(0,Ar.forEach)(this.definition,function(t){t.accept(e)})},r}();zt.AbstractProduction=ko;var bj=function(r){Pa(e,r);function e(t){var i=r.call(this,[])||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this.referencedRule!==void 0?this.referencedRule.definition:[]},set:function(t){},enumerable:!1,configurable:!0}),e.prototype.accept=function(t){t.visit(this)},e}(ko);zt.NonTerminal=bj;var Qj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.orgText="",(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Rule=Qj;var Sj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.ignoreAmbiguities=!1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Alternative=Sj;var vj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Option=vj;var xj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionMandatory=xj;var Pj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionMandatoryWithSeparator=Pj;var Dj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Repetition=Dj;var kj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionWithSeparator=kj;var Rj=function(r){Pa(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,i.ignoreAmbiguities=!1,i.hasPredicates=!1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this._definition},set:function(t){this._definition=t},enumerable:!1,configurable:!0}),e}(ko);zt.Alternation=Rj;var oy=function(){function r(e){this.idx=1,(0,Ar.assign)(this,(0,Ar.pick)(e,function(t){return t!==void 0}))}return r.prototype.accept=function(e){e.visit(this)},r}();zt.Terminal=oy;function iIe(r){return(0,Ar.map)(r,Qd)}zt.serializeGrammar=iIe;function Qd(r){function e(s){return(0,Ar.map)(s,Qd)}if(r instanceof bj){var t={type:"NonTerminal",name:r.nonTerminalName,idx:r.idx};return(0,Ar.isString)(r.label)&&(t.label=r.label),t}else{if(r instanceof Sj)return{type:"Alternative",definition:e(r.definition)};if(r instanceof vj)return{type:"Option",idx:r.idx,definition:e(r.definition)};if(r instanceof xj)return{type:"RepetitionMandatory",idx:r.idx,definition:e(r.definition)};if(r instanceof Pj)return{type:"RepetitionMandatoryWithSeparator",idx:r.idx,separator:Qd(new oy({terminalType:r.separator})),definition:e(r.definition)};if(r instanceof kj)return{type:"RepetitionWithSeparator",idx:r.idx,separator:Qd(new oy({terminalType:r.separator})),definition:e(r.definition)};if(r instanceof Dj)return{type:"Repetition",idx:r.idx,definition:e(r.definition)};if(r instanceof Rj)return{type:"Alternation",idx:r.idx,definition:e(r.definition)};if(r instanceof oy){var i={type:"Terminal",name:r.terminalType.name,label:(0,rIe.tokenLabel)(r.terminalType),idx:r.idx};(0,Ar.isString)(r.label)&&(i.terminalLabel=r.label);var n=r.terminalType.PATTERN;return r.terminalType.PATTERN&&(i.pattern=(0,Ar.isRegExp)(n)?n.source:n),i}else{if(r instanceof Qj)return{type:"Rule",name:r.name,orgText:r.orgText,definition:e(r.definition)};throw Error("non exhaustive match")}}}zt.serializeProduction=Qd});var Ay=w(ay=>{"use strict";Object.defineProperty(ay,"__esModule",{value:!0});ay.RestWalker=void 0;var Gv=Gt(),En=mn(),nIe=function(){function r(){}return r.prototype.walk=function(e,t){var i=this;t===void 0&&(t=[]),(0,Gv.forEach)(e.definition,function(n,s){var o=(0,Gv.drop)(e.definition,s+1);if(n instanceof En.NonTerminal)i.walkProdRef(n,o,t);else if(n instanceof En.Terminal)i.walkTerminal(n,o,t);else if(n instanceof En.Alternative)i.walkFlat(n,o,t);else if(n instanceof En.Option)i.walkOption(n,o,t);else if(n instanceof En.RepetitionMandatory)i.walkAtLeastOne(n,o,t);else if(n instanceof En.RepetitionMandatoryWithSeparator)i.walkAtLeastOneSep(n,o,t);else if(n instanceof En.RepetitionWithSeparator)i.walkManySep(n,o,t);else if(n instanceof En.Repetition)i.walkMany(n,o,t);else if(n instanceof En.Alternation)i.walkOr(n,o,t);else throw Error("non exhaustive match")})},r.prototype.walkTerminal=function(e,t,i){},r.prototype.walkProdRef=function(e,t,i){},r.prototype.walkFlat=function(e,t,i){var n=t.concat(i);this.walk(e,n)},r.prototype.walkOption=function(e,t,i){var n=t.concat(i);this.walk(e,n)},r.prototype.walkAtLeastOne=function(e,t,i){var n=[new En.Option({definition:e.definition})].concat(t,i);this.walk(e,n)},r.prototype.walkAtLeastOneSep=function(e,t,i){var n=Fj(e,t,i);this.walk(e,n)},r.prototype.walkMany=function(e,t,i){var n=[new En.Option({definition:e.definition})].concat(t,i);this.walk(e,n)},r.prototype.walkManySep=function(e,t,i){var n=Fj(e,t,i);this.walk(e,n)},r.prototype.walkOr=function(e,t,i){var n=this,s=t.concat(i);(0,Gv.forEach)(e.definition,function(o){var a=new En.Alternative({definition:[o]});n.walk(a,s)})},r}();ay.RestWalker=nIe;function Fj(r,e,t){var i=[new En.Option({definition:[new En.Terminal({terminalType:r.separator})].concat(r.definition)})],n=i.concat(e,t);return n}});var $g=w(ly=>{"use strict";Object.defineProperty(ly,"__esModule",{value:!0});ly.GAstVisitor=void 0;var Ro=mn(),sIe=function(){function r(){}return r.prototype.visit=function(e){var t=e;switch(t.constructor){case Ro.NonTerminal:return this.visitNonTerminal(t);case Ro.Alternative:return this.visitAlternative(t);case Ro.Option:return this.visitOption(t);case Ro.RepetitionMandatory:return this.visitRepetitionMandatory(t);case Ro.RepetitionMandatoryWithSeparator:return this.visitRepetitionMandatoryWithSeparator(t);case Ro.RepetitionWithSeparator:return this.visitRepetitionWithSeparator(t);case Ro.Repetition:return this.visitRepetition(t);case Ro.Alternation:return this.visitAlternation(t);case Ro.Terminal:return this.visitTerminal(t);case Ro.Rule:return this.visitRule(t);default:throw Error("non exhaustive match")}},r.prototype.visitNonTerminal=function(e){},r.prototype.visitAlternative=function(e){},r.prototype.visitOption=function(e){},r.prototype.visitRepetition=function(e){},r.prototype.visitRepetitionMandatory=function(e){},r.prototype.visitRepetitionMandatoryWithSeparator=function(e){},r.prototype.visitRepetitionWithSeparator=function(e){},r.prototype.visitAlternation=function(e){},r.prototype.visitTerminal=function(e){},r.prototype.visitRule=function(e){},r}();ly.GAstVisitor=sIe});var vd=w(Oi=>{"use strict";var oIe=Oi&&Oi.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Oi,"__esModule",{value:!0});Oi.collectMethods=Oi.DslMethodsCollectorVisitor=Oi.getProductionDslName=Oi.isBranchingProd=Oi.isOptionalProd=Oi.isSequenceProd=void 0;var Sd=Gt(),br=mn(),aIe=$g();function AIe(r){return r instanceof br.Alternative||r instanceof br.Option||r instanceof br.Repetition||r instanceof br.RepetitionMandatory||r instanceof br.RepetitionMandatoryWithSeparator||r instanceof br.RepetitionWithSeparator||r instanceof br.Terminal||r instanceof br.Rule}Oi.isSequenceProd=AIe;function Yv(r,e){e===void 0&&(e=[]);var t=r instanceof br.Option||r instanceof br.Repetition||r instanceof br.RepetitionWithSeparator;return t?!0:r instanceof br.Alternation?(0,Sd.some)(r.definition,function(i){return Yv(i,e)}):r instanceof br.NonTerminal&&(0,Sd.contains)(e,r)?!1:r instanceof br.AbstractProduction?(r instanceof br.NonTerminal&&e.push(r),(0,Sd.every)(r.definition,function(i){return Yv(i,e)})):!1}Oi.isOptionalProd=Yv;function lIe(r){return r instanceof br.Alternation}Oi.isBranchingProd=lIe;function cIe(r){if(r instanceof br.NonTerminal)return"SUBRULE";if(r instanceof br.Option)return"OPTION";if(r instanceof br.Alternation)return"OR";if(r instanceof br.RepetitionMandatory)return"AT_LEAST_ONE";if(r instanceof br.RepetitionMandatoryWithSeparator)return"AT_LEAST_ONE_SEP";if(r instanceof br.RepetitionWithSeparator)return"MANY_SEP";if(r instanceof br.Repetition)return"MANY";if(r instanceof br.Terminal)return"CONSUME";throw Error("non exhaustive match")}Oi.getProductionDslName=cIe;var Nj=function(r){oIe(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.separator="-",t.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]},t}return e.prototype.reset=function(){this.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]}},e.prototype.visitTerminal=function(t){var i=t.terminalType.name+this.separator+"Terminal";(0,Sd.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(t)},e.prototype.visitNonTerminal=function(t){var i=t.nonTerminalName+this.separator+"Terminal";(0,Sd.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(t)},e.prototype.visitOption=function(t){this.dslMethods.option.push(t)},e.prototype.visitRepetitionWithSeparator=function(t){this.dslMethods.repetitionWithSeparator.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.dslMethods.repetitionMandatory.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.dslMethods.repetitionMandatoryWithSeparator.push(t)},e.prototype.visitRepetition=function(t){this.dslMethods.repetition.push(t)},e.prototype.visitAlternation=function(t){this.dslMethods.alternation.push(t)},e}(aIe.GAstVisitor);Oi.DslMethodsCollectorVisitor=Nj;var cy=new Nj;function uIe(r){cy.reset(),r.accept(cy);var e=cy.dslMethods;return cy.reset(),e}Oi.collectMethods=uIe});var qv=w(Fo=>{"use strict";Object.defineProperty(Fo,"__esModule",{value:!0});Fo.firstForTerminal=Fo.firstForBranching=Fo.firstForSequence=Fo.first=void 0;var uy=Gt(),Tj=mn(),jv=vd();function gy(r){if(r instanceof Tj.NonTerminal)return gy(r.referencedRule);if(r instanceof Tj.Terminal)return Oj(r);if((0,jv.isSequenceProd)(r))return Lj(r);if((0,jv.isBranchingProd)(r))return Mj(r);throw Error("non exhaustive match")}Fo.first=gy;function Lj(r){for(var e=[],t=r.definition,i=0,n=t.length>i,s,o=!0;n&&o;)s=t[i],o=(0,jv.isOptionalProd)(s),e=e.concat(gy(s)),i=i+1,n=t.length>i;return(0,uy.uniq)(e)}Fo.firstForSequence=Lj;function Mj(r){var e=(0,uy.map)(r.definition,function(t){return gy(t)});return(0,uy.uniq)((0,uy.flatten)(e))}Fo.firstForBranching=Mj;function Oj(r){return[r.terminalType]}Fo.firstForTerminal=Oj});var Jv=w(fy=>{"use strict";Object.defineProperty(fy,"__esModule",{value:!0});fy.IN=void 0;fy.IN="_~IN~_"});var Yj=w(fs=>{"use strict";var gIe=fs&&fs.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(fs,"__esModule",{value:!0});fs.buildInProdFollowPrefix=fs.buildBetweenProdsFollowPrefix=fs.computeAllProdsFollows=fs.ResyncFollowsWalker=void 0;var fIe=Ay(),hIe=qv(),Kj=Gt(),Uj=Jv(),pIe=mn(),Hj=function(r){gIe(e,r);function e(t){var i=r.call(this)||this;return i.topProd=t,i.follows={},i}return e.prototype.startWalking=function(){return this.walk(this.topProd),this.follows},e.prototype.walkTerminal=function(t,i,n){},e.prototype.walkProdRef=function(t,i,n){var s=Gj(t.referencedRule,t.idx)+this.topProd.name,o=i.concat(n),a=new pIe.Alternative({definition:o}),l=(0,hIe.first)(a);this.follows[s]=l},e}(fIe.RestWalker);fs.ResyncFollowsWalker=Hj;function dIe(r){var e={};return(0,Kj.forEach)(r,function(t){var i=new Hj(t).startWalking();(0,Kj.assign)(e,i)}),e}fs.computeAllProdsFollows=dIe;function Gj(r,e){return r.name+e+Uj.IN}fs.buildBetweenProdsFollowPrefix=Gj;function CIe(r){var e=r.terminalType.name;return e+r.idx+Uj.IN}fs.buildInProdFollowPrefix=CIe});var xd=w(Da=>{"use strict";Object.defineProperty(Da,"__esModule",{value:!0});Da.defaultGrammarValidatorErrorProvider=Da.defaultGrammarResolverErrorProvider=Da.defaultParserErrorProvider=void 0;var ef=LA(),mIe=Gt(),eo=Gt(),Wv=mn(),jj=vd();Da.defaultParserErrorProvider={buildMismatchTokenMessage:function(r){var e=r.expected,t=r.actual,i=r.previous,n=r.ruleName,s=(0,ef.hasTokenLabel)(e),o=s?"--> "+(0,ef.tokenLabel)(e)+" <--":"token of type --> "+e.name+" <--",a="Expecting "+o+" but found --> '"+t.image+"' <--";return a},buildNotAllInputParsedMessage:function(r){var e=r.firstRedundant,t=r.ruleName;return"Redundant input, expecting EOF but found: "+e.image},buildNoViableAltMessage:function(r){var e=r.expectedPathsPerAlt,t=r.actual,i=r.previous,n=r.customUserDescription,s=r.ruleName,o="Expecting: ",a=(0,eo.first)(t).image,l=` +but found: '`+a+"'";if(n)return o+n+l;var c=(0,eo.reduce)(e,function(h,p){return h.concat(p)},[]),u=(0,eo.map)(c,function(h){return"["+(0,eo.map)(h,function(p){return(0,ef.tokenLabel)(p)}).join(", ")+"]"}),g=(0,eo.map)(u,function(h,p){return" "+(p+1)+". "+h}),f=`one of these possible Token sequences: +`+g.join(` +`);return o+f+l},buildEarlyExitMessage:function(r){var e=r.expectedIterationPaths,t=r.actual,i=r.customUserDescription,n=r.ruleName,s="Expecting: ",o=(0,eo.first)(t).image,a=` +but found: '`+o+"'";if(i)return s+i+a;var l=(0,eo.map)(e,function(u){return"["+(0,eo.map)(u,function(g){return(0,ef.tokenLabel)(g)}).join(",")+"]"}),c=`expecting at least one iteration which starts with one of these possible Token sequences:: + `+("<"+l.join(" ,")+">");return s+c+a}};Object.freeze(Da.defaultParserErrorProvider);Da.defaultGrammarResolverErrorProvider={buildRuleNotFoundError:function(r,e){var t="Invalid grammar, reference to a rule which is not defined: ->"+e.nonTerminalName+`<- +inside top level rule: ->`+r.name+"<-";return t}};Da.defaultGrammarValidatorErrorProvider={buildDuplicateFoundError:function(r,e){function t(u){return u instanceof Wv.Terminal?u.terminalType.name:u instanceof Wv.NonTerminal?u.nonTerminalName:""}var i=r.name,n=(0,eo.first)(e),s=n.idx,o=(0,jj.getProductionDslName)(n),a=t(n),l=s>0,c="->"+o+(l?s:"")+"<- "+(a?"with argument: ->"+a+"<-":"")+` + appears more than once (`+e.length+" times) in the top level rule: ->"+i+`<-. + For further details see: https://chevrotain.io/docs/FAQ.html#NUMERICAL_SUFFIXES + `;return c=c.replace(/[ \t]+/g," "),c=c.replace(/\s\s+/g,` +`),c},buildNamespaceConflictError:function(r){var e=`Namespace conflict found in grammar. +`+("The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <"+r.name+`>. +`)+`To resolve this make sure each Terminal and Non-Terminal names are unique +This is easy to accomplish by using the convention that Terminal names start with an uppercase letter +and Non-Terminal names start with a lower case letter.`;return e},buildAlternationPrefixAmbiguityError:function(r){var e=(0,eo.map)(r.prefixPath,function(n){return(0,ef.tokenLabel)(n)}).join(", "),t=r.alternation.idx===0?"":r.alternation.idx,i="Ambiguous alternatives: <"+r.ambiguityIndices.join(" ,")+`> due to common lookahead prefix +`+("in inside <"+r.topLevelRule.name+`> Rule, +`)+("<"+e+`> may appears as a prefix path in all these alternatives. +`)+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX +For Further details.`;return i},buildAlternationAmbiguityError:function(r){var e=(0,eo.map)(r.prefixPath,function(n){return(0,ef.tokenLabel)(n)}).join(", "),t=r.alternation.idx===0?"":r.alternation.idx,i="Ambiguous Alternatives Detected: <"+r.ambiguityIndices.join(" ,")+"> in "+(" inside <"+r.topLevelRule.name+`> Rule, +`)+("<"+e+`> may appears as a prefix path in all these alternatives. +`);return i=i+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES +For Further details.`,i},buildEmptyRepetitionError:function(r){var e=(0,jj.getProductionDslName)(r.repetition);r.repetition.idx!==0&&(e+=r.repetition.idx);var t="The repetition <"+e+"> within Rule <"+r.topLevelRule.name+`> can never consume any tokens. +This could lead to an infinite loop.`;return t},buildTokenNameError:function(r){return"deprecated"},buildEmptyAlternationError:function(r){var e="Ambiguous empty alternative: <"+(r.emptyChoiceIdx+1)+">"+(" in inside <"+r.topLevelRule.name+`> Rule. +`)+"Only the last alternative may be an empty alternative.";return e},buildTooManyAlternativesError:function(r){var e=`An Alternation cannot have more than 256 alternatives: +`+(" inside <"+r.topLevelRule.name+`> Rule. + has `+(r.alternation.definition.length+1)+" alternatives.");return e},buildLeftRecursionError:function(r){var e=r.topLevelRule.name,t=mIe.map(r.leftRecursionPath,function(s){return s.name}),i=e+" --> "+t.concat([e]).join(" --> "),n=`Left Recursion found in grammar. +`+("rule: <"+e+`> can be invoked from itself (directly or indirectly) +`)+(`without consuming any Tokens. The grammar path that causes this is: + `+i+` +`)+` To fix this refactor your grammar to remove the left recursion. +see: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`;return n},buildInvalidRuleNameError:function(r){return"deprecated"},buildDuplicateRuleNameError:function(r){var e;r.topLevelRule instanceof Wv.Rule?e=r.topLevelRule.name:e=r.topLevelRule;var t="Duplicate definition, rule: ->"+e+"<- is already defined in the grammar: ->"+r.grammarName+"<-";return t}}});var Wj=w(MA=>{"use strict";var EIe=MA&&MA.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(MA,"__esModule",{value:!0});MA.GastRefResolverVisitor=MA.resolveGrammar=void 0;var IIe=jn(),qj=Gt(),yIe=$g();function wIe(r,e){var t=new Jj(r,e);return t.resolveRefs(),t.errors}MA.resolveGrammar=wIe;var Jj=function(r){EIe(e,r);function e(t,i){var n=r.call(this)||this;return n.nameToTopRule=t,n.errMsgProvider=i,n.errors=[],n}return e.prototype.resolveRefs=function(){var t=this;(0,qj.forEach)((0,qj.values)(this.nameToTopRule),function(i){t.currTopLevel=i,i.accept(t)})},e.prototype.visitNonTerminal=function(t){var i=this.nameToTopRule[t.nonTerminalName];if(i)t.referencedRule=i;else{var n=this.errMsgProvider.buildRuleNotFoundError(this.currTopLevel,t);this.errors.push({message:n,type:IIe.ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,ruleName:this.currTopLevel.name,unresolvedRefName:t.nonTerminalName})}},e}(yIe.GAstVisitor);MA.GastRefResolverVisitor=Jj});var Dd=w(Nr=>{"use strict";var mc=Nr&&Nr.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Nr,"__esModule",{value:!0});Nr.nextPossibleTokensAfter=Nr.possiblePathsFrom=Nr.NextTerminalAfterAtLeastOneSepWalker=Nr.NextTerminalAfterAtLeastOneWalker=Nr.NextTerminalAfterManySepWalker=Nr.NextTerminalAfterManyWalker=Nr.AbstractNextTerminalAfterProductionWalker=Nr.NextAfterTokenWalker=Nr.AbstractNextPossibleTokensWalker=void 0;var zj=Ay(),Kt=Gt(),BIe=qv(),kt=mn(),Vj=function(r){mc(e,r);function e(t,i){var n=r.call(this)||this;return n.topProd=t,n.path=i,n.possibleTokTypes=[],n.nextProductionName="",n.nextProductionOccurrence=0,n.found=!1,n.isAtEndOfPath=!1,n}return e.prototype.startWalking=function(){if(this.found=!1,this.path.ruleStack[0]!==this.topProd.name)throw Error("The path does not start with the walker's top Rule!");return this.ruleStack=(0,Kt.cloneArr)(this.path.ruleStack).reverse(),this.occurrenceStack=(0,Kt.cloneArr)(this.path.occurrenceStack).reverse(),this.ruleStack.pop(),this.occurrenceStack.pop(),this.updateExpectedNext(),this.walk(this.topProd),this.possibleTokTypes},e.prototype.walk=function(t,i){i===void 0&&(i=[]),this.found||r.prototype.walk.call(this,t,i)},e.prototype.walkProdRef=function(t,i,n){if(t.referencedRule.name===this.nextProductionName&&t.idx===this.nextProductionOccurrence){var s=i.concat(n);this.updateExpectedNext(),this.walk(t.referencedRule,s)}},e.prototype.updateExpectedNext=function(){(0,Kt.isEmpty)(this.ruleStack)?(this.nextProductionName="",this.nextProductionOccurrence=0,this.isAtEndOfPath=!0):(this.nextProductionName=this.ruleStack.pop(),this.nextProductionOccurrence=this.occurrenceStack.pop())},e}(zj.RestWalker);Nr.AbstractNextPossibleTokensWalker=Vj;var bIe=function(r){mc(e,r);function e(t,i){var n=r.call(this,t,i)||this;return n.path=i,n.nextTerminalName="",n.nextTerminalOccurrence=0,n.nextTerminalName=n.path.lastTok.name,n.nextTerminalOccurrence=n.path.lastTokOccurrence,n}return e.prototype.walkTerminal=function(t,i,n){if(this.isAtEndOfPath&&t.terminalType.name===this.nextTerminalName&&t.idx===this.nextTerminalOccurrence&&!this.found){var s=i.concat(n),o=new kt.Alternative({definition:s});this.possibleTokTypes=(0,BIe.first)(o),this.found=!0}},e}(Vj);Nr.NextAfterTokenWalker=bIe;var Pd=function(r){mc(e,r);function e(t,i){var n=r.call(this)||this;return n.topRule=t,n.occurrence=i,n.result={token:void 0,occurrence:void 0,isEndOfRule:void 0},n}return e.prototype.startWalking=function(){return this.walk(this.topRule),this.result},e}(zj.RestWalker);Nr.AbstractNextTerminalAfterProductionWalker=Pd;var QIe=function(r){mc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkMany=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkMany.call(this,t,i,n)},e}(Pd);Nr.NextTerminalAfterManyWalker=QIe;var SIe=function(r){mc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkManySep=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkManySep.call(this,t,i,n)},e}(Pd);Nr.NextTerminalAfterManySepWalker=SIe;var vIe=function(r){mc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkAtLeastOne=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkAtLeastOne.call(this,t,i,n)},e}(Pd);Nr.NextTerminalAfterAtLeastOneWalker=vIe;var xIe=function(r){mc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkAtLeastOneSep=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkAtLeastOneSep.call(this,t,i,n)},e}(Pd);Nr.NextTerminalAfterAtLeastOneSepWalker=xIe;function Xj(r,e,t){t===void 0&&(t=[]),t=(0,Kt.cloneArr)(t);var i=[],n=0;function s(c){return c.concat((0,Kt.drop)(r,n+1))}function o(c){var u=Xj(s(c),e,t);return i.concat(u)}for(;t.length=0;ge--){var re=B.definition[ge],M={idx:p,def:re.definition.concat((0,Kt.drop)(h)),ruleStack:C,occurrenceStack:y};g.push(M),g.push(o)}else if(B instanceof kt.Alternative)g.push({idx:p,def:B.definition.concat((0,Kt.drop)(h)),ruleStack:C,occurrenceStack:y});else if(B instanceof kt.Rule)g.push(DIe(B,p,C,y));else throw Error("non exhaustive match")}}return u}Nr.nextPossibleTokensAfter=PIe;function DIe(r,e,t,i){var n=(0,Kt.cloneArr)(t);n.push(r.name);var s=(0,Kt.cloneArr)(i);return s.push(1),{idx:e,def:r.definition,ruleStack:n,occurrenceStack:s}}});var kd=w(Zt=>{"use strict";var $j=Zt&&Zt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Zt,"__esModule",{value:!0});Zt.areTokenCategoriesNotUsed=Zt.isStrictPrefixOfPath=Zt.containsPath=Zt.getLookaheadPathsForOptionalProd=Zt.getLookaheadPathsForOr=Zt.lookAheadSequenceFromAlternatives=Zt.buildSingleAlternativeLookaheadFunction=Zt.buildAlternativesLookAheadFunc=Zt.buildLookaheadFuncForOptionalProd=Zt.buildLookaheadFuncForOr=Zt.getProdType=Zt.PROD_TYPE=void 0;var sr=Gt(),Zj=Dd(),kIe=Ay(),hy=_g(),OA=mn(),RIe=$g(),oi;(function(r){r[r.OPTION=0]="OPTION",r[r.REPETITION=1]="REPETITION",r[r.REPETITION_MANDATORY=2]="REPETITION_MANDATORY",r[r.REPETITION_MANDATORY_WITH_SEPARATOR=3]="REPETITION_MANDATORY_WITH_SEPARATOR",r[r.REPETITION_WITH_SEPARATOR=4]="REPETITION_WITH_SEPARATOR",r[r.ALTERNATION=5]="ALTERNATION"})(oi=Zt.PROD_TYPE||(Zt.PROD_TYPE={}));function FIe(r){if(r instanceof OA.Option)return oi.OPTION;if(r instanceof OA.Repetition)return oi.REPETITION;if(r instanceof OA.RepetitionMandatory)return oi.REPETITION_MANDATORY;if(r instanceof OA.RepetitionMandatoryWithSeparator)return oi.REPETITION_MANDATORY_WITH_SEPARATOR;if(r instanceof OA.RepetitionWithSeparator)return oi.REPETITION_WITH_SEPARATOR;if(r instanceof OA.Alternation)return oi.ALTERNATION;throw Error("non exhaustive match")}Zt.getProdType=FIe;function NIe(r,e,t,i,n,s){var o=tq(r,e,t),a=Xv(o)?hy.tokenStructuredMatcherNoCategories:hy.tokenStructuredMatcher;return s(o,i,a,n)}Zt.buildLookaheadFuncForOr=NIe;function TIe(r,e,t,i,n,s){var o=rq(r,e,n,t),a=Xv(o)?hy.tokenStructuredMatcherNoCategories:hy.tokenStructuredMatcher;return s(o[0],a,i)}Zt.buildLookaheadFuncForOptionalProd=TIe;function LIe(r,e,t,i){var n=r.length,s=(0,sr.every)(r,function(l){return(0,sr.every)(l,function(c){return c.length===1})});if(e)return function(l){for(var c=(0,sr.map)(l,function(D){return D.GATE}),u=0;u{"use strict";var Zv=Vt&&Vt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Vt,"__esModule",{value:!0});Vt.checkPrefixAlternativesAmbiguities=Vt.validateSomeNonEmptyLookaheadPath=Vt.validateTooManyAlts=Vt.RepetionCollector=Vt.validateAmbiguousAlternationAlternatives=Vt.validateEmptyOrAlternative=Vt.getFirstNoneTerminal=Vt.validateNoLeftRecursion=Vt.validateRuleIsOverridden=Vt.validateRuleDoesNotAlreadyExist=Vt.OccurrenceValidationCollector=Vt.identifyProductionForDuplicates=Vt.validateGrammar=void 0;var er=Gt(),Qr=Gt(),No=jn(),_v=vd(),tf=kd(),HIe=Dd(),to=mn(),$v=$g();function GIe(r,e,t,i,n){var s=er.map(r,function(h){return YIe(h,i)}),o=er.map(r,function(h){return ex(h,h,i)}),a=[],l=[],c=[];(0,Qr.every)(o,Qr.isEmpty)&&(a=(0,Qr.map)(r,function(h){return Aq(h,i)}),l=(0,Qr.map)(r,function(h){return lq(h,e,i)}),c=gq(r,e,i));var u=JIe(r,t,i),g=(0,Qr.map)(r,function(h){return uq(h,i)}),f=(0,Qr.map)(r,function(h){return aq(h,r,n,i)});return er.flatten(s.concat(c,o,a,l,u,g,f))}Vt.validateGrammar=GIe;function YIe(r,e){var t=new oq;r.accept(t);var i=t.allProductions,n=er.groupBy(i,nq),s=er.pick(n,function(a){return a.length>1}),o=er.map(er.values(s),function(a){var l=er.first(a),c=e.buildDuplicateFoundError(r,a),u=(0,_v.getProductionDslName)(l),g={message:c,type:No.ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS,ruleName:r.name,dslName:u,occurrence:l.idx},f=sq(l);return f&&(g.parameter=f),g});return o}function nq(r){return(0,_v.getProductionDslName)(r)+"_#_"+r.idx+"_#_"+sq(r)}Vt.identifyProductionForDuplicates=nq;function sq(r){return r instanceof to.Terminal?r.terminalType.name:r instanceof to.NonTerminal?r.nonTerminalName:""}var oq=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.allProductions=[],t}return e.prototype.visitNonTerminal=function(t){this.allProductions.push(t)},e.prototype.visitOption=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetition=function(t){this.allProductions.push(t)},e.prototype.visitAlternation=function(t){this.allProductions.push(t)},e.prototype.visitTerminal=function(t){this.allProductions.push(t)},e}($v.GAstVisitor);Vt.OccurrenceValidationCollector=oq;function aq(r,e,t,i){var n=[],s=(0,Qr.reduce)(e,function(a,l){return l.name===r.name?a+1:a},0);if(s>1){var o=i.buildDuplicateRuleNameError({topLevelRule:r,grammarName:t});n.push({message:o,type:No.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:r.name})}return n}Vt.validateRuleDoesNotAlreadyExist=aq;function jIe(r,e,t){var i=[],n;return er.contains(e,r)||(n="Invalid rule override, rule: ->"+r+"<- cannot be overridden in the grammar: ->"+t+"<-as it is not defined in any of the super grammars ",i.push({message:n,type:No.ParserDefinitionErrorType.INVALID_RULE_OVERRIDE,ruleName:r})),i}Vt.validateRuleIsOverridden=jIe;function ex(r,e,t,i){i===void 0&&(i=[]);var n=[],s=Rd(e.definition);if(er.isEmpty(s))return[];var o=r.name,a=er.contains(s,r);a&&n.push({message:t.buildLeftRecursionError({topLevelRule:r,leftRecursionPath:i}),type:No.ParserDefinitionErrorType.LEFT_RECURSION,ruleName:o});var l=er.difference(s,i.concat([r])),c=er.map(l,function(u){var g=er.cloneArr(i);return g.push(u),ex(r,u,t,g)});return n.concat(er.flatten(c))}Vt.validateNoLeftRecursion=ex;function Rd(r){var e=[];if(er.isEmpty(r))return e;var t=er.first(r);if(t instanceof to.NonTerminal)e.push(t.referencedRule);else if(t instanceof to.Alternative||t instanceof to.Option||t instanceof to.RepetitionMandatory||t instanceof to.RepetitionMandatoryWithSeparator||t instanceof to.RepetitionWithSeparator||t instanceof to.Repetition)e=e.concat(Rd(t.definition));else if(t instanceof to.Alternation)e=er.flatten(er.map(t.definition,function(o){return Rd(o.definition)}));else if(!(t instanceof to.Terminal))throw Error("non exhaustive match");var i=(0,_v.isOptionalProd)(t),n=r.length>1;if(i&&n){var s=er.drop(r);return e.concat(Rd(s))}else return e}Vt.getFirstNoneTerminal=Rd;var tx=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.alternations=[],t}return e.prototype.visitAlternation=function(t){this.alternations.push(t)},e}($v.GAstVisitor);function Aq(r,e){var t=new tx;r.accept(t);var i=t.alternations,n=er.reduce(i,function(s,o){var a=er.dropRight(o.definition),l=er.map(a,function(c,u){var g=(0,HIe.nextPossibleTokensAfter)([c],[],null,1);return er.isEmpty(g)?{message:e.buildEmptyAlternationError({topLevelRule:r,alternation:o,emptyChoiceIdx:u}),type:No.ParserDefinitionErrorType.NONE_LAST_EMPTY_ALT,ruleName:r.name,occurrence:o.idx,alternative:u+1}:null});return s.concat(er.compact(l))},[]);return n}Vt.validateEmptyOrAlternative=Aq;function lq(r,e,t){var i=new tx;r.accept(i);var n=i.alternations;n=(0,Qr.reject)(n,function(o){return o.ignoreAmbiguities===!0});var s=er.reduce(n,function(o,a){var l=a.idx,c=a.maxLookahead||e,u=(0,tf.getLookaheadPathsForOr)(l,r,c,a),g=qIe(u,a,r,t),f=fq(u,a,r,t);return o.concat(g,f)},[]);return s}Vt.validateAmbiguousAlternationAlternatives=lq;var cq=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.allProductions=[],t}return e.prototype.visitRepetitionWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetition=function(t){this.allProductions.push(t)},e}($v.GAstVisitor);Vt.RepetionCollector=cq;function uq(r,e){var t=new tx;r.accept(t);var i=t.alternations,n=er.reduce(i,function(s,o){return o.definition.length>255&&s.push({message:e.buildTooManyAlternativesError({topLevelRule:r,alternation:o}),type:No.ParserDefinitionErrorType.TOO_MANY_ALTS,ruleName:r.name,occurrence:o.idx}),s},[]);return n}Vt.validateTooManyAlts=uq;function gq(r,e,t){var i=[];return(0,Qr.forEach)(r,function(n){var s=new cq;n.accept(s);var o=s.allProductions;(0,Qr.forEach)(o,function(a){var l=(0,tf.getProdType)(a),c=a.maxLookahead||e,u=a.idx,g=(0,tf.getLookaheadPathsForOptionalProd)(u,n,l,c),f=g[0];if((0,Qr.isEmpty)((0,Qr.flatten)(f))){var h=t.buildEmptyRepetitionError({topLevelRule:n,repetition:a});i.push({message:h,type:No.ParserDefinitionErrorType.NO_NON_EMPTY_LOOKAHEAD,ruleName:n.name})}})}),i}Vt.validateSomeNonEmptyLookaheadPath=gq;function qIe(r,e,t,i){var n=[],s=(0,Qr.reduce)(r,function(a,l,c){return e.definition[c].ignoreAmbiguities===!0||(0,Qr.forEach)(l,function(u){var g=[c];(0,Qr.forEach)(r,function(f,h){c!==h&&(0,tf.containsPath)(f,u)&&e.definition[h].ignoreAmbiguities!==!0&&g.push(h)}),g.length>1&&!(0,tf.containsPath)(n,u)&&(n.push(u),a.push({alts:g,path:u}))}),a},[]),o=er.map(s,function(a){var l=(0,Qr.map)(a.alts,function(u){return u+1}),c=i.buildAlternationAmbiguityError({topLevelRule:t,alternation:e,ambiguityIndices:l,prefixPath:a.path});return{message:c,type:No.ParserDefinitionErrorType.AMBIGUOUS_ALTS,ruleName:t.name,occurrence:e.idx,alternatives:[a.alts]}});return o}function fq(r,e,t,i){var n=[],s=(0,Qr.reduce)(r,function(o,a,l){var c=(0,Qr.map)(a,function(u){return{idx:l,path:u}});return o.concat(c)},[]);return(0,Qr.forEach)(s,function(o){var a=e.definition[o.idx];if(a.ignoreAmbiguities!==!0){var l=o.idx,c=o.path,u=(0,Qr.findAll)(s,function(f){return e.definition[f.idx].ignoreAmbiguities!==!0&&f.idx{"use strict";Object.defineProperty(rf,"__esModule",{value:!0});rf.validateGrammar=rf.resolveGrammar=void 0;var ix=Gt(),WIe=Wj(),zIe=rx(),hq=xd();function VIe(r){r=(0,ix.defaults)(r,{errMsgProvider:hq.defaultGrammarResolverErrorProvider});var e={};return(0,ix.forEach)(r.rules,function(t){e[t.name]=t}),(0,WIe.resolveGrammar)(e,r.errMsgProvider)}rf.resolveGrammar=VIe;function XIe(r){return r=(0,ix.defaults)(r,{errMsgProvider:hq.defaultGrammarValidatorErrorProvider}),(0,zIe.validateGrammar)(r.rules,r.maxLookahead,r.tokenTypes,r.errMsgProvider,r.grammarName)}rf.validateGrammar=XIe});var nf=w(In=>{"use strict";var Fd=In&&In.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(In,"__esModule",{value:!0});In.EarlyExitException=In.NotAllInputParsedException=In.NoViableAltException=In.MismatchedTokenException=In.isRecognitionException=void 0;var ZIe=Gt(),dq="MismatchedTokenException",Cq="NoViableAltException",mq="EarlyExitException",Eq="NotAllInputParsedException",Iq=[dq,Cq,mq,Eq];Object.freeze(Iq);function _Ie(r){return(0,ZIe.contains)(Iq,r.name)}In.isRecognitionException=_Ie;var py=function(r){Fd(e,r);function e(t,i){var n=this.constructor,s=r.call(this,t)||this;return s.token=i,s.resyncedTokens=[],Object.setPrototypeOf(s,n.prototype),Error.captureStackTrace&&Error.captureStackTrace(s,s.constructor),s}return e}(Error),$Ie=function(r){Fd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=dq,s}return e}(py);In.MismatchedTokenException=$Ie;var eye=function(r){Fd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=Cq,s}return e}(py);In.NoViableAltException=eye;var tye=function(r){Fd(e,r);function e(t,i){var n=r.call(this,t,i)||this;return n.name=Eq,n}return e}(py);In.NotAllInputParsedException=tye;var rye=function(r){Fd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=mq,s}return e}(py);In.EarlyExitException=rye});var sx=w(Ki=>{"use strict";Object.defineProperty(Ki,"__esModule",{value:!0});Ki.attemptInRepetitionRecovery=Ki.Recoverable=Ki.InRuleRecoveryException=Ki.IN_RULE_RECOVERY_EXCEPTION=Ki.EOF_FOLLOW_KEY=void 0;var dy=LA(),hs=Gt(),iye=nf(),nye=Jv(),sye=jn();Ki.EOF_FOLLOW_KEY={};Ki.IN_RULE_RECOVERY_EXCEPTION="InRuleRecoveryException";function nx(r){this.name=Ki.IN_RULE_RECOVERY_EXCEPTION,this.message=r}Ki.InRuleRecoveryException=nx;nx.prototype=Error.prototype;var oye=function(){function r(){}return r.prototype.initRecoverable=function(e){this.firstAfterRepMap={},this.resyncFollows={},this.recoveryEnabled=(0,hs.has)(e,"recoveryEnabled")?e.recoveryEnabled:sye.DEFAULT_PARSER_CONFIG.recoveryEnabled,this.recoveryEnabled&&(this.attemptInRepetitionRecovery=yq)},r.prototype.getTokenToInsert=function(e){var t=(0,dy.createTokenInstance)(e,"",NaN,NaN,NaN,NaN,NaN,NaN);return t.isInsertedInRecovery=!0,t},r.prototype.canTokenTypeBeInsertedInRecovery=function(e){return!0},r.prototype.tryInRepetitionRecovery=function(e,t,i,n){for(var s=this,o=this.findReSyncTokenType(),a=this.exportLexerState(),l=[],c=!1,u=this.LA(1),g=this.LA(1),f=function(){var h=s.LA(0),p=s.errorMessageProvider.buildMismatchTokenMessage({expected:n,actual:u,previous:h,ruleName:s.getCurrRuleFullName()}),C=new iye.MismatchedTokenException(p,u,s.LA(0));C.resyncedTokens=(0,hs.dropRight)(l),s.SAVE_ERROR(C)};!c;)if(this.tokenMatcher(g,n)){f();return}else if(i.call(this)){f(),e.apply(this,t);return}else this.tokenMatcher(g,o)?c=!0:(g=this.SKIP_TOKEN(),this.addToResyncTokens(g,l));this.importLexerState(a)},r.prototype.shouldInRepetitionRecoveryBeTried=function(e,t,i){return!(i===!1||e===void 0||t===void 0||this.tokenMatcher(this.LA(1),e)||this.isBackTracking()||this.canPerformInRuleRecovery(e,this.getFollowsForInRuleRecovery(e,t)))},r.prototype.getFollowsForInRuleRecovery=function(e,t){var i=this.getCurrentGrammarPath(e,t),n=this.getNextPossibleTokenTypes(i);return n},r.prototype.tryInRuleRecovery=function(e,t){if(this.canRecoverWithSingleTokenInsertion(e,t)){var i=this.getTokenToInsert(e);return i}if(this.canRecoverWithSingleTokenDeletion(e)){var n=this.SKIP_TOKEN();return this.consumeToken(),n}throw new nx("sad sad panda")},r.prototype.canPerformInRuleRecovery=function(e,t){return this.canRecoverWithSingleTokenInsertion(e,t)||this.canRecoverWithSingleTokenDeletion(e)},r.prototype.canRecoverWithSingleTokenInsertion=function(e,t){var i=this;if(!this.canTokenTypeBeInsertedInRecovery(e)||(0,hs.isEmpty)(t))return!1;var n=this.LA(1),s=(0,hs.find)(t,function(o){return i.tokenMatcher(n,o)})!==void 0;return s},r.prototype.canRecoverWithSingleTokenDeletion=function(e){var t=this.tokenMatcher(this.LA(2),e);return t},r.prototype.isInCurrentRuleReSyncSet=function(e){var t=this.getCurrFollowKey(),i=this.getFollowSetFromFollowKey(t);return(0,hs.contains)(i,e)},r.prototype.findReSyncTokenType=function(){for(var e=this.flattenFollowSet(),t=this.LA(1),i=2;;){var n=t.tokenType;if((0,hs.contains)(e,n))return n;t=this.LA(i),i++}},r.prototype.getCurrFollowKey=function(){if(this.RULE_STACK.length===1)return Ki.EOF_FOLLOW_KEY;var e=this.getLastExplicitRuleShortName(),t=this.getLastExplicitRuleOccurrenceIndex(),i=this.getPreviousExplicitRuleShortName();return{ruleName:this.shortRuleNameToFullName(e),idxInCallingRule:t,inRule:this.shortRuleNameToFullName(i)}},r.prototype.buildFullFollowKeyStack=function(){var e=this,t=this.RULE_STACK,i=this.RULE_OCCURRENCE_STACK;return(0,hs.map)(t,function(n,s){return s===0?Ki.EOF_FOLLOW_KEY:{ruleName:e.shortRuleNameToFullName(n),idxInCallingRule:i[s],inRule:e.shortRuleNameToFullName(t[s-1])}})},r.prototype.flattenFollowSet=function(){var e=this,t=(0,hs.map)(this.buildFullFollowKeyStack(),function(i){return e.getFollowSetFromFollowKey(i)});return(0,hs.flatten)(t)},r.prototype.getFollowSetFromFollowKey=function(e){if(e===Ki.EOF_FOLLOW_KEY)return[dy.EOF];var t=e.ruleName+e.idxInCallingRule+nye.IN+e.inRule;return this.resyncFollows[t]},r.prototype.addToResyncTokens=function(e,t){return this.tokenMatcher(e,dy.EOF)||t.push(e),t},r.prototype.reSyncTo=function(e){for(var t=[],i=this.LA(1);this.tokenMatcher(i,e)===!1;)i=this.SKIP_TOKEN(),this.addToResyncTokens(i,t);return(0,hs.dropRight)(t)},r.prototype.attemptInRepetitionRecovery=function(e,t,i,n,s,o,a){},r.prototype.getCurrentGrammarPath=function(e,t){var i=this.getHumanReadableRuleStack(),n=(0,hs.cloneArr)(this.RULE_OCCURRENCE_STACK),s={ruleStack:i,occurrenceStack:n,lastTok:e,lastTokOccurrence:t};return s},r.prototype.getHumanReadableRuleStack=function(){var e=this;return(0,hs.map)(this.RULE_STACK,function(t){return e.shortRuleNameToFullName(t)})},r}();Ki.Recoverable=oye;function yq(r,e,t,i,n,s,o){var a=this.getKeyForAutomaticLookahead(i,n),l=this.firstAfterRepMap[a];if(l===void 0){var c=this.getCurrRuleFullName(),u=this.getGAstProductions()[c],g=new s(u,n);l=g.startWalking(),this.firstAfterRepMap[a]=l}var f=l.token,h=l.occurrence,p=l.isEndOfRule;this.RULE_STACK.length===1&&p&&f===void 0&&(f=dy.EOF,h=1),this.shouldInRepetitionRecoveryBeTried(f,h,o)&&this.tryInRepetitionRecovery(r,e,t,f)}Ki.attemptInRepetitionRecovery=yq});var Cy=w(Jt=>{"use strict";Object.defineProperty(Jt,"__esModule",{value:!0});Jt.getKeyForAutomaticLookahead=Jt.AT_LEAST_ONE_SEP_IDX=Jt.MANY_SEP_IDX=Jt.AT_LEAST_ONE_IDX=Jt.MANY_IDX=Jt.OPTION_IDX=Jt.OR_IDX=Jt.BITS_FOR_ALT_IDX=Jt.BITS_FOR_RULE_IDX=Jt.BITS_FOR_OCCURRENCE_IDX=Jt.BITS_FOR_METHOD_TYPE=void 0;Jt.BITS_FOR_METHOD_TYPE=4;Jt.BITS_FOR_OCCURRENCE_IDX=8;Jt.BITS_FOR_RULE_IDX=12;Jt.BITS_FOR_ALT_IDX=8;Jt.OR_IDX=1<{"use strict";Object.defineProperty(my,"__esModule",{value:!0});my.LooksAhead=void 0;var ka=kd(),ro=Gt(),wq=jn(),Ra=Cy(),Ec=vd(),Aye=function(){function r(){}return r.prototype.initLooksAhead=function(e){this.dynamicTokensEnabled=(0,ro.has)(e,"dynamicTokensEnabled")?e.dynamicTokensEnabled:wq.DEFAULT_PARSER_CONFIG.dynamicTokensEnabled,this.maxLookahead=(0,ro.has)(e,"maxLookahead")?e.maxLookahead:wq.DEFAULT_PARSER_CONFIG.maxLookahead,this.lookAheadFuncsCache=(0,ro.isES2015MapSupported)()?new Map:[],(0,ro.isES2015MapSupported)()?(this.getLaFuncFromCache=this.getLaFuncFromMap,this.setLaFuncCache=this.setLaFuncCacheUsingMap):(this.getLaFuncFromCache=this.getLaFuncFromObj,this.setLaFuncCache=this.setLaFuncUsingObj)},r.prototype.preComputeLookaheadFunctions=function(e){var t=this;(0,ro.forEach)(e,function(i){t.TRACE_INIT(i.name+" Rule Lookahead",function(){var n=(0,Ec.collectMethods)(i),s=n.alternation,o=n.repetition,a=n.option,l=n.repetitionMandatory,c=n.repetitionMandatoryWithSeparator,u=n.repetitionWithSeparator;(0,ro.forEach)(s,function(g){var f=g.idx===0?"":g.idx;t.TRACE_INIT(""+(0,Ec.getProductionDslName)(g)+f,function(){var h=(0,ka.buildLookaheadFuncForOr)(g.idx,i,g.maxLookahead||t.maxLookahead,g.hasPredicates,t.dynamicTokensEnabled,t.lookAheadBuilderForAlternatives),p=(0,Ra.getKeyForAutomaticLookahead)(t.fullRuleNameToShort[i.name],Ra.OR_IDX,g.idx);t.setLaFuncCache(p,h)})}),(0,ro.forEach)(o,function(g){t.computeLookaheadFunc(i,g.idx,Ra.MANY_IDX,ka.PROD_TYPE.REPETITION,g.maxLookahead,(0,Ec.getProductionDslName)(g))}),(0,ro.forEach)(a,function(g){t.computeLookaheadFunc(i,g.idx,Ra.OPTION_IDX,ka.PROD_TYPE.OPTION,g.maxLookahead,(0,Ec.getProductionDslName)(g))}),(0,ro.forEach)(l,function(g){t.computeLookaheadFunc(i,g.idx,Ra.AT_LEAST_ONE_IDX,ka.PROD_TYPE.REPETITION_MANDATORY,g.maxLookahead,(0,Ec.getProductionDslName)(g))}),(0,ro.forEach)(c,function(g){t.computeLookaheadFunc(i,g.idx,Ra.AT_LEAST_ONE_SEP_IDX,ka.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR,g.maxLookahead,(0,Ec.getProductionDslName)(g))}),(0,ro.forEach)(u,function(g){t.computeLookaheadFunc(i,g.idx,Ra.MANY_SEP_IDX,ka.PROD_TYPE.REPETITION_WITH_SEPARATOR,g.maxLookahead,(0,Ec.getProductionDslName)(g))})})})},r.prototype.computeLookaheadFunc=function(e,t,i,n,s,o){var a=this;this.TRACE_INIT(""+o+(t===0?"":t),function(){var l=(0,ka.buildLookaheadFuncForOptionalProd)(t,e,s||a.maxLookahead,a.dynamicTokensEnabled,n,a.lookAheadBuilderForOptional),c=(0,Ra.getKeyForAutomaticLookahead)(a.fullRuleNameToShort[e.name],i,t);a.setLaFuncCache(c,l)})},r.prototype.lookAheadBuilderForOptional=function(e,t,i){return(0,ka.buildSingleAlternativeLookaheadFunction)(e,t,i)},r.prototype.lookAheadBuilderForAlternatives=function(e,t,i,n){return(0,ka.buildAlternativesLookAheadFunc)(e,t,i,n)},r.prototype.getKeyForAutomaticLookahead=function(e,t){var i=this.getLastExplicitRuleShortName();return(0,Ra.getKeyForAutomaticLookahead)(i,e,t)},r.prototype.getLaFuncFromCache=function(e){},r.prototype.getLaFuncFromMap=function(e){return this.lookAheadFuncsCache.get(e)},r.prototype.getLaFuncFromObj=function(e){return this.lookAheadFuncsCache[e]},r.prototype.setLaFuncCache=function(e,t){},r.prototype.setLaFuncCacheUsingMap=function(e,t){this.lookAheadFuncsCache.set(e,t)},r.prototype.setLaFuncUsingObj=function(e,t){this.lookAheadFuncsCache[e]=t},r}();my.LooksAhead=Aye});var bq=w(To=>{"use strict";Object.defineProperty(To,"__esModule",{value:!0});To.addNoneTerminalToCst=To.addTerminalToCst=To.setNodeLocationFull=To.setNodeLocationOnlyOffset=void 0;function lye(r,e){isNaN(r.startOffset)===!0?(r.startOffset=e.startOffset,r.endOffset=e.endOffset):r.endOffset{"use strict";Object.defineProperty(KA,"__esModule",{value:!0});KA.defineNameProp=KA.functionName=KA.classNameFromInstance=void 0;var fye=Gt();function hye(r){return Sq(r.constructor)}KA.classNameFromInstance=hye;var Qq="name";function Sq(r){var e=r.name;return e||"anonymous"}KA.functionName=Sq;function pye(r,e){var t=Object.getOwnPropertyDescriptor(r,Qq);return(0,fye.isUndefined)(t)||t.configurable?(Object.defineProperty(r,Qq,{enumerable:!1,configurable:!0,writable:!1,value:e}),!0):!1}KA.defineNameProp=pye});var kq=w(Si=>{"use strict";Object.defineProperty(Si,"__esModule",{value:!0});Si.validateRedundantMethods=Si.validateMissingCstMethods=Si.validateVisitor=Si.CstVisitorDefinitionError=Si.createBaseVisitorConstructorWithDefaults=Si.createBaseSemanticVisitorConstructor=Si.defaultVisit=void 0;var ps=Gt(),Nd=ox();function vq(r,e){for(var t=(0,ps.keys)(r),i=t.length,n=0;n: + `+(""+s.join(` + +`).replace(/\n/g,` + `)))}}};return t.prototype=i,t.prototype.constructor=t,t._RULE_NAMES=e,t}Si.createBaseSemanticVisitorConstructor=dye;function Cye(r,e,t){var i=function(){};(0,Nd.defineNameProp)(i,r+"BaseSemanticsWithDefaults");var n=Object.create(t.prototype);return(0,ps.forEach)(e,function(s){n[s]=vq}),i.prototype=n,i.prototype.constructor=i,i}Si.createBaseVisitorConstructorWithDefaults=Cye;var ax;(function(r){r[r.REDUNDANT_METHOD=0]="REDUNDANT_METHOD",r[r.MISSING_METHOD=1]="MISSING_METHOD"})(ax=Si.CstVisitorDefinitionError||(Si.CstVisitorDefinitionError={}));function xq(r,e){var t=Pq(r,e),i=Dq(r,e);return t.concat(i)}Si.validateVisitor=xq;function Pq(r,e){var t=(0,ps.map)(e,function(i){if(!(0,ps.isFunction)(r[i]))return{msg:"Missing visitor method: <"+i+"> on "+(0,Nd.functionName)(r.constructor)+" CST Visitor.",type:ax.MISSING_METHOD,methodName:i}});return(0,ps.compact)(t)}Si.validateMissingCstMethods=Pq;var mye=["constructor","visit","validateVisitor"];function Dq(r,e){var t=[];for(var i in r)(0,ps.isFunction)(r[i])&&!(0,ps.contains)(mye,i)&&!(0,ps.contains)(e,i)&&t.push({msg:"Redundant visitor method: <"+i+"> on "+(0,Nd.functionName)(r.constructor)+` CST Visitor +There is no Grammar Rule corresponding to this method's name. +`,type:ax.REDUNDANT_METHOD,methodName:i});return t}Si.validateRedundantMethods=Dq});var Fq=w(Ey=>{"use strict";Object.defineProperty(Ey,"__esModule",{value:!0});Ey.TreeBuilder=void 0;var sf=bq(),_r=Gt(),Rq=kq(),Eye=jn(),Iye=function(){function r(){}return r.prototype.initTreeBuilder=function(e){if(this.CST_STACK=[],this.outputCst=e.outputCst,this.nodeLocationTracking=(0,_r.has)(e,"nodeLocationTracking")?e.nodeLocationTracking:Eye.DEFAULT_PARSER_CONFIG.nodeLocationTracking,!this.outputCst)this.cstInvocationStateUpdate=_r.NOOP,this.cstFinallyStateUpdate=_r.NOOP,this.cstPostTerminal=_r.NOOP,this.cstPostNonTerminal=_r.NOOP,this.cstPostRule=_r.NOOP;else if(/full/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=sf.setNodeLocationFull,this.setNodeLocationFromNode=sf.setNodeLocationFull,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationFullRecovery):(this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=this.cstPostRuleFull,this.setInitialNodeLocation=this.setInitialNodeLocationFullRegular);else if(/onlyOffset/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=sf.setNodeLocationOnlyOffset,this.setNodeLocationFromNode=sf.setNodeLocationOnlyOffset,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRecovery):(this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=this.cstPostRuleOnlyOffset,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRegular);else if(/none/i.test(this.nodeLocationTracking))this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=_r.NOOP;else throw Error('Invalid config option: "'+e.nodeLocationTracking+'"')},r.prototype.setInitialNodeLocationOnlyOffsetRecovery=function(e){e.location={startOffset:NaN,endOffset:NaN}},r.prototype.setInitialNodeLocationOnlyOffsetRegular=function(e){e.location={startOffset:this.LA(1).startOffset,endOffset:NaN}},r.prototype.setInitialNodeLocationFullRecovery=function(e){e.location={startOffset:NaN,startLine:NaN,startColumn:NaN,endOffset:NaN,endLine:NaN,endColumn:NaN}},r.prototype.setInitialNodeLocationFullRegular=function(e){var t=this.LA(1);e.location={startOffset:t.startOffset,startLine:t.startLine,startColumn:t.startColumn,endOffset:NaN,endLine:NaN,endColumn:NaN}},r.prototype.cstInvocationStateUpdate=function(e,t){var i={name:e,children:{}};this.setInitialNodeLocation(i),this.CST_STACK.push(i)},r.prototype.cstFinallyStateUpdate=function(){this.CST_STACK.pop()},r.prototype.cstPostRuleFull=function(e){var t=this.LA(0),i=e.location;i.startOffset<=t.startOffset?(i.endOffset=t.endOffset,i.endLine=t.endLine,i.endColumn=t.endColumn):(i.startOffset=NaN,i.startLine=NaN,i.startColumn=NaN)},r.prototype.cstPostRuleOnlyOffset=function(e){var t=this.LA(0),i=e.location;i.startOffset<=t.startOffset?i.endOffset=t.endOffset:i.startOffset=NaN},r.prototype.cstPostTerminal=function(e,t){var i=this.CST_STACK[this.CST_STACK.length-1];(0,sf.addTerminalToCst)(i,t,e),this.setNodeLocationFromToken(i.location,t)},r.prototype.cstPostNonTerminal=function(e,t){var i=this.CST_STACK[this.CST_STACK.length-1];(0,sf.addNoneTerminalToCst)(i,t,e),this.setNodeLocationFromNode(i.location,e.location)},r.prototype.getBaseCstVisitorConstructor=function(){if((0,_r.isUndefined)(this.baseCstVisitorConstructor)){var e=(0,Rq.createBaseSemanticVisitorConstructor)(this.className,(0,_r.keys)(this.gastProductionsCache));return this.baseCstVisitorConstructor=e,e}return this.baseCstVisitorConstructor},r.prototype.getBaseCstVisitorConstructorWithDefaults=function(){if((0,_r.isUndefined)(this.baseCstVisitorWithDefaultsConstructor)){var e=(0,Rq.createBaseVisitorConstructorWithDefaults)(this.className,(0,_r.keys)(this.gastProductionsCache),this.getBaseCstVisitorConstructor());return this.baseCstVisitorWithDefaultsConstructor=e,e}return this.baseCstVisitorWithDefaultsConstructor},r.prototype.getLastExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-1]},r.prototype.getPreviousExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-2]},r.prototype.getLastExplicitRuleOccurrenceIndex=function(){var e=this.RULE_OCCURRENCE_STACK;return e[e.length-1]},r}();Ey.TreeBuilder=Iye});var Tq=w(Iy=>{"use strict";Object.defineProperty(Iy,"__esModule",{value:!0});Iy.LexerAdapter=void 0;var Nq=jn(),yye=function(){function r(){}return r.prototype.initLexerAdapter=function(){this.tokVector=[],this.tokVectorLength=0,this.currIdx=-1},Object.defineProperty(r.prototype,"input",{get:function(){return this.tokVector},set:function(e){if(this.selfAnalysisDone!==!0)throw Error("Missing invocation at the end of the Parser's constructor.");this.reset(),this.tokVector=e,this.tokVectorLength=e.length},enumerable:!1,configurable:!0}),r.prototype.SKIP_TOKEN=function(){return this.currIdx<=this.tokVector.length-2?(this.consumeToken(),this.LA(1)):Nq.END_OF_FILE},r.prototype.LA=function(e){var t=this.currIdx+e;return t<0||this.tokVectorLength<=t?Nq.END_OF_FILE:this.tokVector[t]},r.prototype.consumeToken=function(){this.currIdx++},r.prototype.exportLexerState=function(){return this.currIdx},r.prototype.importLexerState=function(e){this.currIdx=e},r.prototype.resetLexerState=function(){this.currIdx=-1},r.prototype.moveToTerminatedState=function(){this.currIdx=this.tokVector.length-1},r.prototype.getLexerPosition=function(){return this.exportLexerState()},r}();Iy.LexerAdapter=yye});var Mq=w(yy=>{"use strict";Object.defineProperty(yy,"__esModule",{value:!0});yy.RecognizerApi=void 0;var Lq=Gt(),wye=nf(),Ax=jn(),Bye=xd(),bye=rx(),Qye=mn(),Sye=function(){function r(){}return r.prototype.ACTION=function(e){return e.call(this)},r.prototype.consume=function(e,t,i){return this.consumeInternal(t,e,i)},r.prototype.subrule=function(e,t,i){return this.subruleInternal(t,e,i)},r.prototype.option=function(e,t){return this.optionInternal(t,e)},r.prototype.or=function(e,t){return this.orInternal(t,e)},r.prototype.many=function(e,t){return this.manyInternal(e,t)},r.prototype.atLeastOne=function(e,t){return this.atLeastOneInternal(e,t)},r.prototype.CONSUME=function(e,t){return this.consumeInternal(e,0,t)},r.prototype.CONSUME1=function(e,t){return this.consumeInternal(e,1,t)},r.prototype.CONSUME2=function(e,t){return this.consumeInternal(e,2,t)},r.prototype.CONSUME3=function(e,t){return this.consumeInternal(e,3,t)},r.prototype.CONSUME4=function(e,t){return this.consumeInternal(e,4,t)},r.prototype.CONSUME5=function(e,t){return this.consumeInternal(e,5,t)},r.prototype.CONSUME6=function(e,t){return this.consumeInternal(e,6,t)},r.prototype.CONSUME7=function(e,t){return this.consumeInternal(e,7,t)},r.prototype.CONSUME8=function(e,t){return this.consumeInternal(e,8,t)},r.prototype.CONSUME9=function(e,t){return this.consumeInternal(e,9,t)},r.prototype.SUBRULE=function(e,t){return this.subruleInternal(e,0,t)},r.prototype.SUBRULE1=function(e,t){return this.subruleInternal(e,1,t)},r.prototype.SUBRULE2=function(e,t){return this.subruleInternal(e,2,t)},r.prototype.SUBRULE3=function(e,t){return this.subruleInternal(e,3,t)},r.prototype.SUBRULE4=function(e,t){return this.subruleInternal(e,4,t)},r.prototype.SUBRULE5=function(e,t){return this.subruleInternal(e,5,t)},r.prototype.SUBRULE6=function(e,t){return this.subruleInternal(e,6,t)},r.prototype.SUBRULE7=function(e,t){return this.subruleInternal(e,7,t)},r.prototype.SUBRULE8=function(e,t){return this.subruleInternal(e,8,t)},r.prototype.SUBRULE9=function(e,t){return this.subruleInternal(e,9,t)},r.prototype.OPTION=function(e){return this.optionInternal(e,0)},r.prototype.OPTION1=function(e){return this.optionInternal(e,1)},r.prototype.OPTION2=function(e){return this.optionInternal(e,2)},r.prototype.OPTION3=function(e){return this.optionInternal(e,3)},r.prototype.OPTION4=function(e){return this.optionInternal(e,4)},r.prototype.OPTION5=function(e){return this.optionInternal(e,5)},r.prototype.OPTION6=function(e){return this.optionInternal(e,6)},r.prototype.OPTION7=function(e){return this.optionInternal(e,7)},r.prototype.OPTION8=function(e){return this.optionInternal(e,8)},r.prototype.OPTION9=function(e){return this.optionInternal(e,9)},r.prototype.OR=function(e){return this.orInternal(e,0)},r.prototype.OR1=function(e){return this.orInternal(e,1)},r.prototype.OR2=function(e){return this.orInternal(e,2)},r.prototype.OR3=function(e){return this.orInternal(e,3)},r.prototype.OR4=function(e){return this.orInternal(e,4)},r.prototype.OR5=function(e){return this.orInternal(e,5)},r.prototype.OR6=function(e){return this.orInternal(e,6)},r.prototype.OR7=function(e){return this.orInternal(e,7)},r.prototype.OR8=function(e){return this.orInternal(e,8)},r.prototype.OR9=function(e){return this.orInternal(e,9)},r.prototype.MANY=function(e){this.manyInternal(0,e)},r.prototype.MANY1=function(e){this.manyInternal(1,e)},r.prototype.MANY2=function(e){this.manyInternal(2,e)},r.prototype.MANY3=function(e){this.manyInternal(3,e)},r.prototype.MANY4=function(e){this.manyInternal(4,e)},r.prototype.MANY5=function(e){this.manyInternal(5,e)},r.prototype.MANY6=function(e){this.manyInternal(6,e)},r.prototype.MANY7=function(e){this.manyInternal(7,e)},r.prototype.MANY8=function(e){this.manyInternal(8,e)},r.prototype.MANY9=function(e){this.manyInternal(9,e)},r.prototype.MANY_SEP=function(e){this.manySepFirstInternal(0,e)},r.prototype.MANY_SEP1=function(e){this.manySepFirstInternal(1,e)},r.prototype.MANY_SEP2=function(e){this.manySepFirstInternal(2,e)},r.prototype.MANY_SEP3=function(e){this.manySepFirstInternal(3,e)},r.prototype.MANY_SEP4=function(e){this.manySepFirstInternal(4,e)},r.prototype.MANY_SEP5=function(e){this.manySepFirstInternal(5,e)},r.prototype.MANY_SEP6=function(e){this.manySepFirstInternal(6,e)},r.prototype.MANY_SEP7=function(e){this.manySepFirstInternal(7,e)},r.prototype.MANY_SEP8=function(e){this.manySepFirstInternal(8,e)},r.prototype.MANY_SEP9=function(e){this.manySepFirstInternal(9,e)},r.prototype.AT_LEAST_ONE=function(e){this.atLeastOneInternal(0,e)},r.prototype.AT_LEAST_ONE1=function(e){return this.atLeastOneInternal(1,e)},r.prototype.AT_LEAST_ONE2=function(e){this.atLeastOneInternal(2,e)},r.prototype.AT_LEAST_ONE3=function(e){this.atLeastOneInternal(3,e)},r.prototype.AT_LEAST_ONE4=function(e){this.atLeastOneInternal(4,e)},r.prototype.AT_LEAST_ONE5=function(e){this.atLeastOneInternal(5,e)},r.prototype.AT_LEAST_ONE6=function(e){this.atLeastOneInternal(6,e)},r.prototype.AT_LEAST_ONE7=function(e){this.atLeastOneInternal(7,e)},r.prototype.AT_LEAST_ONE8=function(e){this.atLeastOneInternal(8,e)},r.prototype.AT_LEAST_ONE9=function(e){this.atLeastOneInternal(9,e)},r.prototype.AT_LEAST_ONE_SEP=function(e){this.atLeastOneSepFirstInternal(0,e)},r.prototype.AT_LEAST_ONE_SEP1=function(e){this.atLeastOneSepFirstInternal(1,e)},r.prototype.AT_LEAST_ONE_SEP2=function(e){this.atLeastOneSepFirstInternal(2,e)},r.prototype.AT_LEAST_ONE_SEP3=function(e){this.atLeastOneSepFirstInternal(3,e)},r.prototype.AT_LEAST_ONE_SEP4=function(e){this.atLeastOneSepFirstInternal(4,e)},r.prototype.AT_LEAST_ONE_SEP5=function(e){this.atLeastOneSepFirstInternal(5,e)},r.prototype.AT_LEAST_ONE_SEP6=function(e){this.atLeastOneSepFirstInternal(6,e)},r.prototype.AT_LEAST_ONE_SEP7=function(e){this.atLeastOneSepFirstInternal(7,e)},r.prototype.AT_LEAST_ONE_SEP8=function(e){this.atLeastOneSepFirstInternal(8,e)},r.prototype.AT_LEAST_ONE_SEP9=function(e){this.atLeastOneSepFirstInternal(9,e)},r.prototype.RULE=function(e,t,i){if(i===void 0&&(i=Ax.DEFAULT_RULE_CONFIG),(0,Lq.contains)(this.definedRulesNames,e)){var n=Bye.defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({topLevelRule:e,grammarName:this.className}),s={message:n,type:Ax.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:e};this.definitionErrors.push(s)}this.definedRulesNames.push(e);var o=this.defineRule(e,t,i);return this[e]=o,o},r.prototype.OVERRIDE_RULE=function(e,t,i){i===void 0&&(i=Ax.DEFAULT_RULE_CONFIG);var n=[];n=n.concat((0,bye.validateRuleIsOverridden)(e,this.definedRulesNames,this.className)),this.definitionErrors=this.definitionErrors.concat(n);var s=this.defineRule(e,t,i);return this[e]=s,s},r.prototype.BACKTRACK=function(e,t){return function(){this.isBackTrackingStack.push(1);var i=this.saveRecogState();try{return e.apply(this,t),!0}catch(n){if((0,wye.isRecognitionException)(n))return!1;throw n}finally{this.reloadRecogState(i),this.isBackTrackingStack.pop()}}},r.prototype.getGAstProductions=function(){return this.gastProductionsCache},r.prototype.getSerializedGastProductions=function(){return(0,Qye.serializeGrammar)((0,Lq.values)(this.gastProductionsCache))},r}();yy.RecognizerApi=Sye});var Hq=w(By=>{"use strict";Object.defineProperty(By,"__esModule",{value:!0});By.RecognizerEngine=void 0;var Pr=Gt(),qn=Cy(),wy=nf(),Oq=kd(),of=Dd(),Kq=jn(),vye=sx(),Uq=LA(),Td=_g(),xye=ox(),Pye=function(){function r(){}return r.prototype.initRecognizerEngine=function(e,t){if(this.className=(0,xye.classNameFromInstance)(this),this.shortRuleNameToFull={},this.fullRuleNameToShort={},this.ruleShortNameIdx=256,this.tokenMatcher=Td.tokenStructuredMatcherNoCategories,this.definedRulesNames=[],this.tokensMap={},this.isBackTrackingStack=[],this.RULE_STACK=[],this.RULE_OCCURRENCE_STACK=[],this.gastProductionsCache={},(0,Pr.has)(t,"serializedGrammar"))throw Error(`The Parser's configuration can no longer contain a property. + See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_6-0-0 + For Further details.`);if((0,Pr.isArray)(e)){if((0,Pr.isEmpty)(e))throw Error(`A Token Vocabulary cannot be empty. + Note that the first argument for the parser constructor + is no longer a Token vector (since v4.0).`);if(typeof e[0].startOffset=="number")throw Error(`The Parser constructor no longer accepts a token vector as the first argument. + See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_4-0-0 + For Further details.`)}if((0,Pr.isArray)(e))this.tokensMap=(0,Pr.reduce)(e,function(o,a){return o[a.name]=a,o},{});else if((0,Pr.has)(e,"modes")&&(0,Pr.every)((0,Pr.flatten)((0,Pr.values)(e.modes)),Td.isTokenType)){var i=(0,Pr.flatten)((0,Pr.values)(e.modes)),n=(0,Pr.uniq)(i);this.tokensMap=(0,Pr.reduce)(n,function(o,a){return o[a.name]=a,o},{})}else if((0,Pr.isObject)(e))this.tokensMap=(0,Pr.cloneObj)(e);else throw new Error(" argument must be An Array of Token constructors, A dictionary of Token constructors or an IMultiModeLexerDefinition");this.tokensMap.EOF=Uq.EOF;var s=(0,Pr.every)((0,Pr.values)(e),function(o){return(0,Pr.isEmpty)(o.categoryMatches)});this.tokenMatcher=s?Td.tokenStructuredMatcherNoCategories:Td.tokenStructuredMatcher,(0,Td.augmentTokenTypes)((0,Pr.values)(this.tokensMap))},r.prototype.defineRule=function(e,t,i){if(this.selfAnalysisDone)throw Error("Grammar rule <"+e+`> may not be defined after the 'performSelfAnalysis' method has been called' +Make sure that all grammar rule definitions are done before 'performSelfAnalysis' is called.`);var n=(0,Pr.has)(i,"resyncEnabled")?i.resyncEnabled:Kq.DEFAULT_RULE_CONFIG.resyncEnabled,s=(0,Pr.has)(i,"recoveryValueFunc")?i.recoveryValueFunc:Kq.DEFAULT_RULE_CONFIG.recoveryValueFunc,o=this.ruleShortNameIdx<t},r.prototype.orInternal=function(e,t){var i=this.getKeyForAutomaticLookahead(qn.OR_IDX,t),n=(0,Pr.isArray)(e)?e:e.DEF,s=this.getLaFuncFromCache(i),o=s.call(this,n);if(o!==void 0){var a=n[o];return a.ALT.call(this)}this.raiseNoAltException(t,e.ERR_MSG)},r.prototype.ruleFinallyStateUpdate=function(){if(this.RULE_STACK.pop(),this.RULE_OCCURRENCE_STACK.pop(),this.cstFinallyStateUpdate(),this.RULE_STACK.length===0&&this.isAtEndOfInput()===!1){var e=this.LA(1),t=this.errorMessageProvider.buildNotAllInputParsedMessage({firstRedundant:e,ruleName:this.getCurrRuleFullName()});this.SAVE_ERROR(new wy.NotAllInputParsedException(t,e))}},r.prototype.subruleInternal=function(e,t,i){var n;try{var s=i!==void 0?i.ARGS:void 0;return n=e.call(this,t,s),this.cstPostNonTerminal(n,i!==void 0&&i.LABEL!==void 0?i.LABEL:e.ruleName),n}catch(o){this.subruleInternalError(o,i,e.ruleName)}},r.prototype.subruleInternalError=function(e,t,i){throw(0,wy.isRecognitionException)(e)&&e.partialCstResult!==void 0&&(this.cstPostNonTerminal(e.partialCstResult,t!==void 0&&t.LABEL!==void 0?t.LABEL:i),delete e.partialCstResult),e},r.prototype.consumeInternal=function(e,t,i){var n;try{var s=this.LA(1);this.tokenMatcher(s,e)===!0?(this.consumeToken(),n=s):this.consumeInternalError(e,s,i)}catch(o){n=this.consumeInternalRecovery(e,t,o)}return this.cstPostTerminal(i!==void 0&&i.LABEL!==void 0?i.LABEL:e.name,n),n},r.prototype.consumeInternalError=function(e,t,i){var n,s=this.LA(0);throw i!==void 0&&i.ERR_MSG?n=i.ERR_MSG:n=this.errorMessageProvider.buildMismatchTokenMessage({expected:e,actual:t,previous:s,ruleName:this.getCurrRuleFullName()}),this.SAVE_ERROR(new wy.MismatchedTokenException(n,t,s))},r.prototype.consumeInternalRecovery=function(e,t,i){if(this.recoveryEnabled&&i.name==="MismatchedTokenException"&&!this.isBackTracking()){var n=this.getFollowsForInRuleRecovery(e,t);try{return this.tryInRuleRecovery(e,n)}catch(s){throw s.name===vye.IN_RULE_RECOVERY_EXCEPTION?i:s}}else throw i},r.prototype.saveRecogState=function(){var e=this.errors,t=(0,Pr.cloneArr)(this.RULE_STACK);return{errors:e,lexerState:this.exportLexerState(),RULE_STACK:t,CST_STACK:this.CST_STACK}},r.prototype.reloadRecogState=function(e){this.errors=e.errors,this.importLexerState(e.lexerState),this.RULE_STACK=e.RULE_STACK},r.prototype.ruleInvocationStateUpdate=function(e,t,i){this.RULE_OCCURRENCE_STACK.push(i),this.RULE_STACK.push(e),this.cstInvocationStateUpdate(t,e)},r.prototype.isBackTracking=function(){return this.isBackTrackingStack.length!==0},r.prototype.getCurrRuleFullName=function(){var e=this.getLastExplicitRuleShortName();return this.shortRuleNameToFull[e]},r.prototype.shortRuleNameToFullName=function(e){return this.shortRuleNameToFull[e]},r.prototype.isAtEndOfInput=function(){return this.tokenMatcher(this.LA(1),Uq.EOF)},r.prototype.reset=function(){this.resetLexerState(),this.isBackTrackingStack=[],this.errors=[],this.RULE_STACK=[],this.CST_STACK=[],this.RULE_OCCURRENCE_STACK=[]},r}();By.RecognizerEngine=Pye});var Yq=w(by=>{"use strict";Object.defineProperty(by,"__esModule",{value:!0});by.ErrorHandler=void 0;var lx=nf(),cx=Gt(),Gq=kd(),Dye=jn(),kye=function(){function r(){}return r.prototype.initErrorHandler=function(e){this._errors=[],this.errorMessageProvider=(0,cx.has)(e,"errorMessageProvider")?e.errorMessageProvider:Dye.DEFAULT_PARSER_CONFIG.errorMessageProvider},r.prototype.SAVE_ERROR=function(e){if((0,lx.isRecognitionException)(e))return e.context={ruleStack:this.getHumanReadableRuleStack(),ruleOccurrenceStack:(0,cx.cloneArr)(this.RULE_OCCURRENCE_STACK)},this._errors.push(e),e;throw Error("Trying to save an Error which is not a RecognitionException")},Object.defineProperty(r.prototype,"errors",{get:function(){return(0,cx.cloneArr)(this._errors)},set:function(e){this._errors=e},enumerable:!1,configurable:!0}),r.prototype.raiseEarlyExitException=function(e,t,i){for(var n=this.getCurrRuleFullName(),s=this.getGAstProductions()[n],o=(0,Gq.getLookaheadPathsForOptionalProd)(e,s,t,this.maxLookahead),a=o[0],l=[],c=1;c<=this.maxLookahead;c++)l.push(this.LA(c));var u=this.errorMessageProvider.buildEarlyExitMessage({expectedIterationPaths:a,actual:l,previous:this.LA(0),customUserDescription:i,ruleName:n});throw this.SAVE_ERROR(new lx.EarlyExitException(u,this.LA(1),this.LA(0)))},r.prototype.raiseNoAltException=function(e,t){for(var i=this.getCurrRuleFullName(),n=this.getGAstProductions()[i],s=(0,Gq.getLookaheadPathsForOr)(e,n,this.maxLookahead),o=[],a=1;a<=this.maxLookahead;a++)o.push(this.LA(a));var l=this.LA(0),c=this.errorMessageProvider.buildNoViableAltMessage({expectedPathsPerAlt:s,actual:o,previous:l,customUserDescription:t,ruleName:this.getCurrRuleFullName()});throw this.SAVE_ERROR(new lx.NoViableAltException(c,this.LA(1),l))},r}();by.ErrorHandler=kye});var Jq=w(Qy=>{"use strict";Object.defineProperty(Qy,"__esModule",{value:!0});Qy.ContentAssist=void 0;var jq=Dd(),qq=Gt(),Rye=function(){function r(){}return r.prototype.initContentAssist=function(){},r.prototype.computeContentAssist=function(e,t){var i=this.gastProductionsCache[e];if((0,qq.isUndefined)(i))throw Error("Rule ->"+e+"<- does not exist in this grammar.");return(0,jq.nextPossibleTokensAfter)([i],t,this.tokenMatcher,this.maxLookahead)},r.prototype.getNextPossibleTokenTypes=function(e){var t=(0,qq.first)(e.ruleStack),i=this.getGAstProductions(),n=i[t],s=new jq.NextAfterTokenWalker(n,e).startWalking();return s},r}();Qy.ContentAssist=Rye});var eJ=w(xy=>{"use strict";Object.defineProperty(xy,"__esModule",{value:!0});xy.GastRecorder=void 0;var yn=Gt(),Lo=mn(),Fye=Bd(),Xq=_g(),Zq=LA(),Nye=jn(),Tye=Cy(),vy={description:"This Object indicates the Parser is during Recording Phase"};Object.freeze(vy);var Wq=!0,zq=Math.pow(2,Tye.BITS_FOR_OCCURRENCE_IDX)-1,_q=(0,Zq.createToken)({name:"RECORDING_PHASE_TOKEN",pattern:Fye.Lexer.NA});(0,Xq.augmentTokenTypes)([_q]);var $q=(0,Zq.createTokenInstance)(_q,`This IToken indicates the Parser is in Recording Phase + See: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,-1,-1,-1,-1,-1,-1);Object.freeze($q);var Lye={name:`This CSTNode indicates the Parser is in Recording Phase + See: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,children:{}},Mye=function(){function r(){}return r.prototype.initGastRecorder=function(e){this.recordingProdStack=[],this.RECORDING_PHASE=!1},r.prototype.enableRecording=function(){var e=this;this.RECORDING_PHASE=!0,this.TRACE_INIT("Enable Recording",function(){for(var t=function(n){var s=n>0?n:"";e["CONSUME"+s]=function(o,a){return this.consumeInternalRecord(o,n,a)},e["SUBRULE"+s]=function(o,a){return this.subruleInternalRecord(o,n,a)},e["OPTION"+s]=function(o){return this.optionInternalRecord(o,n)},e["OR"+s]=function(o){return this.orInternalRecord(o,n)},e["MANY"+s]=function(o){this.manyInternalRecord(n,o)},e["MANY_SEP"+s]=function(o){this.manySepFirstInternalRecord(n,o)},e["AT_LEAST_ONE"+s]=function(o){this.atLeastOneInternalRecord(n,o)},e["AT_LEAST_ONE_SEP"+s]=function(o){this.atLeastOneSepFirstInternalRecord(n,o)}},i=0;i<10;i++)t(i);e.consume=function(n,s,o){return this.consumeInternalRecord(s,n,o)},e.subrule=function(n,s,o){return this.subruleInternalRecord(s,n,o)},e.option=function(n,s){return this.optionInternalRecord(s,n)},e.or=function(n,s){return this.orInternalRecord(s,n)},e.many=function(n,s){this.manyInternalRecord(n,s)},e.atLeastOne=function(n,s){this.atLeastOneInternalRecord(n,s)},e.ACTION=e.ACTION_RECORD,e.BACKTRACK=e.BACKTRACK_RECORD,e.LA=e.LA_RECORD})},r.prototype.disableRecording=function(){var e=this;this.RECORDING_PHASE=!1,this.TRACE_INIT("Deleting Recording methods",function(){for(var t=0;t<10;t++){var i=t>0?t:"";delete e["CONSUME"+i],delete e["SUBRULE"+i],delete e["OPTION"+i],delete e["OR"+i],delete e["MANY"+i],delete e["MANY_SEP"+i],delete e["AT_LEAST_ONE"+i],delete e["AT_LEAST_ONE_SEP"+i]}delete e.consume,delete e.subrule,delete e.option,delete e.or,delete e.many,delete e.atLeastOne,delete e.ACTION,delete e.BACKTRACK,delete e.LA})},r.prototype.ACTION_RECORD=function(e){},r.prototype.BACKTRACK_RECORD=function(e,t){return function(){return!0}},r.prototype.LA_RECORD=function(e){return Nye.END_OF_FILE},r.prototype.topLevelRuleRecord=function(e,t){try{var i=new Lo.Rule({definition:[],name:e});return i.name=e,this.recordingProdStack.push(i),t.call(this),this.recordingProdStack.pop(),i}catch(n){if(n.KNOWN_RECORDER_ERROR!==!0)try{n.message=n.message+` + This error was thrown during the "grammar recording phase" For more info see: + https://chevrotain.io/docs/guide/internals.html#grammar-recording`}catch{throw n}throw n}},r.prototype.optionInternalRecord=function(e,t){return Ld.call(this,Lo.Option,e,t)},r.prototype.atLeastOneInternalRecord=function(e,t){Ld.call(this,Lo.RepetitionMandatory,t,e)},r.prototype.atLeastOneSepFirstInternalRecord=function(e,t){Ld.call(this,Lo.RepetitionMandatoryWithSeparator,t,e,Wq)},r.prototype.manyInternalRecord=function(e,t){Ld.call(this,Lo.Repetition,t,e)},r.prototype.manySepFirstInternalRecord=function(e,t){Ld.call(this,Lo.RepetitionWithSeparator,t,e,Wq)},r.prototype.orInternalRecord=function(e,t){return Oye.call(this,e,t)},r.prototype.subruleInternalRecord=function(e,t,i){if(Sy(t),!e||(0,yn.has)(e,"ruleName")===!1){var n=new Error(" argument is invalid"+(" expecting a Parser method reference but got: <"+JSON.stringify(e)+">")+(` + inside top level rule: <`+this.recordingProdStack[0].name+">"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,yn.peek)(this.recordingProdStack),o=e.ruleName,a=new Lo.NonTerminal({idx:t,nonTerminalName:o,label:i==null?void 0:i.LABEL,referencedRule:void 0});return s.definition.push(a),this.outputCst?Lye:vy},r.prototype.consumeInternalRecord=function(e,t,i){if(Sy(t),!(0,Xq.hasShortKeyProperty)(e)){var n=new Error(" argument is invalid"+(" expecting a TokenType reference but got: <"+JSON.stringify(e)+">")+(` + inside top level rule: <`+this.recordingProdStack[0].name+">"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,yn.peek)(this.recordingProdStack),o=new Lo.Terminal({idx:t,terminalType:e,label:i==null?void 0:i.LABEL});return s.definition.push(o),$q},r}();xy.GastRecorder=Mye;function Ld(r,e,t,i){i===void 0&&(i=!1),Sy(t);var n=(0,yn.peek)(this.recordingProdStack),s=(0,yn.isFunction)(e)?e:e.DEF,o=new r({definition:[],idx:t});return i&&(o.separator=e.SEP),(0,yn.has)(e,"MAX_LOOKAHEAD")&&(o.maxLookahead=e.MAX_LOOKAHEAD),this.recordingProdStack.push(o),s.call(this),n.definition.push(o),this.recordingProdStack.pop(),vy}function Oye(r,e){var t=this;Sy(e);var i=(0,yn.peek)(this.recordingProdStack),n=(0,yn.isArray)(r)===!1,s=n===!1?r:r.DEF,o=new Lo.Alternation({definition:[],idx:e,ignoreAmbiguities:n&&r.IGNORE_AMBIGUITIES===!0});(0,yn.has)(r,"MAX_LOOKAHEAD")&&(o.maxLookahead=r.MAX_LOOKAHEAD);var a=(0,yn.some)(s,function(l){return(0,yn.isFunction)(l.GATE)});return o.hasPredicates=a,i.definition.push(o),(0,yn.forEach)(s,function(l){var c=new Lo.Alternative({definition:[]});o.definition.push(c),(0,yn.has)(l,"IGNORE_AMBIGUITIES")?c.ignoreAmbiguities=l.IGNORE_AMBIGUITIES:(0,yn.has)(l,"GATE")&&(c.ignoreAmbiguities=!0),t.recordingProdStack.push(c),l.ALT.call(t),t.recordingProdStack.pop()}),vy}function Vq(r){return r===0?"":""+r}function Sy(r){if(r<0||r>zq){var e=new Error("Invalid DSL Method idx value: <"+r+`> + `+("Idx value must be a none negative value smaller than "+(zq+1)));throw e.KNOWN_RECORDER_ERROR=!0,e}}});var rJ=w(Py=>{"use strict";Object.defineProperty(Py,"__esModule",{value:!0});Py.PerformanceTracer=void 0;var tJ=Gt(),Kye=jn(),Uye=function(){function r(){}return r.prototype.initPerformanceTracer=function(e){if((0,tJ.has)(e,"traceInitPerf")){var t=e.traceInitPerf,i=typeof t=="number";this.traceInitMaxIdent=i?t:1/0,this.traceInitPerf=i?t>0:t}else this.traceInitMaxIdent=0,this.traceInitPerf=Kye.DEFAULT_PARSER_CONFIG.traceInitPerf;this.traceInitIndent=-1},r.prototype.TRACE_INIT=function(e,t){if(this.traceInitPerf===!0){this.traceInitIndent++;var i=new Array(this.traceInitIndent+1).join(" ");this.traceInitIndent <"+e+">");var n=(0,tJ.timer)(t),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent time: "+s+"ms"),this.traceInitIndent--,o}else return t()},r}();Py.PerformanceTracer=Uye});var iJ=w(Dy=>{"use strict";Object.defineProperty(Dy,"__esModule",{value:!0});Dy.applyMixins=void 0;function Hye(r,e){e.forEach(function(t){var i=t.prototype;Object.getOwnPropertyNames(i).forEach(function(n){if(n!=="constructor"){var s=Object.getOwnPropertyDescriptor(i,n);s&&(s.get||s.set)?Object.defineProperty(r.prototype,n,s):r.prototype[n]=t.prototype[n]}})})}Dy.applyMixins=Hye});var jn=w(dr=>{"use strict";var oJ=dr&&dr.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(dr,"__esModule",{value:!0});dr.EmbeddedActionsParser=dr.CstParser=dr.Parser=dr.EMPTY_ALT=dr.ParserDefinitionErrorType=dr.DEFAULT_RULE_CONFIG=dr.DEFAULT_PARSER_CONFIG=dr.END_OF_FILE=void 0;var en=Gt(),Gye=Yj(),nJ=LA(),aJ=xd(),sJ=pq(),Yye=sx(),jye=Bq(),qye=Fq(),Jye=Tq(),Wye=Mq(),zye=Hq(),Vye=Yq(),Xye=Jq(),Zye=eJ(),_ye=rJ(),$ye=iJ();dr.END_OF_FILE=(0,nJ.createTokenInstance)(nJ.EOF,"",NaN,NaN,NaN,NaN,NaN,NaN);Object.freeze(dr.END_OF_FILE);dr.DEFAULT_PARSER_CONFIG=Object.freeze({recoveryEnabled:!1,maxLookahead:3,dynamicTokensEnabled:!1,outputCst:!0,errorMessageProvider:aJ.defaultParserErrorProvider,nodeLocationTracking:"none",traceInitPerf:!1,skipValidations:!1});dr.DEFAULT_RULE_CONFIG=Object.freeze({recoveryValueFunc:function(){},resyncEnabled:!0});var ewe;(function(r){r[r.INVALID_RULE_NAME=0]="INVALID_RULE_NAME",r[r.DUPLICATE_RULE_NAME=1]="DUPLICATE_RULE_NAME",r[r.INVALID_RULE_OVERRIDE=2]="INVALID_RULE_OVERRIDE",r[r.DUPLICATE_PRODUCTIONS=3]="DUPLICATE_PRODUCTIONS",r[r.UNRESOLVED_SUBRULE_REF=4]="UNRESOLVED_SUBRULE_REF",r[r.LEFT_RECURSION=5]="LEFT_RECURSION",r[r.NONE_LAST_EMPTY_ALT=6]="NONE_LAST_EMPTY_ALT",r[r.AMBIGUOUS_ALTS=7]="AMBIGUOUS_ALTS",r[r.CONFLICT_TOKENS_RULES_NAMESPACE=8]="CONFLICT_TOKENS_RULES_NAMESPACE",r[r.INVALID_TOKEN_NAME=9]="INVALID_TOKEN_NAME",r[r.NO_NON_EMPTY_LOOKAHEAD=10]="NO_NON_EMPTY_LOOKAHEAD",r[r.AMBIGUOUS_PREFIX_ALTS=11]="AMBIGUOUS_PREFIX_ALTS",r[r.TOO_MANY_ALTS=12]="TOO_MANY_ALTS"})(ewe=dr.ParserDefinitionErrorType||(dr.ParserDefinitionErrorType={}));function twe(r){return r===void 0&&(r=void 0),function(){return r}}dr.EMPTY_ALT=twe;var ky=function(){function r(e,t){this.definitionErrors=[],this.selfAnalysisDone=!1;var i=this;if(i.initErrorHandler(t),i.initLexerAdapter(),i.initLooksAhead(t),i.initRecognizerEngine(e,t),i.initRecoverable(t),i.initTreeBuilder(t),i.initContentAssist(),i.initGastRecorder(t),i.initPerformanceTracer(t),(0,en.has)(t,"ignoredIssues"))throw new Error(`The IParserConfig property has been deprecated. + Please use the flag on the relevant DSL method instead. + See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#IGNORING_AMBIGUITIES + For further details.`);this.skipValidations=(0,en.has)(t,"skipValidations")?t.skipValidations:dr.DEFAULT_PARSER_CONFIG.skipValidations}return r.performSelfAnalysis=function(e){throw Error("The **static** `performSelfAnalysis` method has been deprecated. \nUse the **instance** method with the same name instead.")},r.prototype.performSelfAnalysis=function(){var e=this;this.TRACE_INIT("performSelfAnalysis",function(){var t;e.selfAnalysisDone=!0;var i=e.className;e.TRACE_INIT("toFastProps",function(){(0,en.toFastProperties)(e)}),e.TRACE_INIT("Grammar Recording",function(){try{e.enableRecording(),(0,en.forEach)(e.definedRulesNames,function(s){var o=e[s],a=o.originalGrammarAction,l=void 0;e.TRACE_INIT(s+" Rule",function(){l=e.topLevelRuleRecord(s,a)}),e.gastProductionsCache[s]=l})}finally{e.disableRecording()}});var n=[];if(e.TRACE_INIT("Grammar Resolving",function(){n=(0,sJ.resolveGrammar)({rules:(0,en.values)(e.gastProductionsCache)}),e.definitionErrors=e.definitionErrors.concat(n)}),e.TRACE_INIT("Grammar Validations",function(){if((0,en.isEmpty)(n)&&e.skipValidations===!1){var s=(0,sJ.validateGrammar)({rules:(0,en.values)(e.gastProductionsCache),maxLookahead:e.maxLookahead,tokenTypes:(0,en.values)(e.tokensMap),errMsgProvider:aJ.defaultGrammarValidatorErrorProvider,grammarName:i});e.definitionErrors=e.definitionErrors.concat(s)}}),(0,en.isEmpty)(e.definitionErrors)&&(e.recoveryEnabled&&e.TRACE_INIT("computeAllProdsFollows",function(){var s=(0,Gye.computeAllProdsFollows)((0,en.values)(e.gastProductionsCache));e.resyncFollows=s}),e.TRACE_INIT("ComputeLookaheadFunctions",function(){e.preComputeLookaheadFunctions((0,en.values)(e.gastProductionsCache))})),!r.DEFER_DEFINITION_ERRORS_HANDLING&&!(0,en.isEmpty)(e.definitionErrors))throw t=(0,en.map)(e.definitionErrors,function(s){return s.message}),new Error(`Parser Definition Errors detected: + `+t.join(` +------------------------------- +`))})},r.DEFER_DEFINITION_ERRORS_HANDLING=!1,r}();dr.Parser=ky;(0,$ye.applyMixins)(ky,[Yye.Recoverable,jye.LooksAhead,qye.TreeBuilder,Jye.LexerAdapter,zye.RecognizerEngine,Wye.RecognizerApi,Vye.ErrorHandler,Xye.ContentAssist,Zye.GastRecorder,_ye.PerformanceTracer]);var rwe=function(r){oJ(e,r);function e(t,i){i===void 0&&(i=dr.DEFAULT_PARSER_CONFIG);var n=this,s=(0,en.cloneObj)(i);return s.outputCst=!0,n=r.call(this,t,s)||this,n}return e}(ky);dr.CstParser=rwe;var iwe=function(r){oJ(e,r);function e(t,i){i===void 0&&(i=dr.DEFAULT_PARSER_CONFIG);var n=this,s=(0,en.cloneObj)(i);return s.outputCst=!1,n=r.call(this,t,s)||this,n}return e}(ky);dr.EmbeddedActionsParser=iwe});var lJ=w(Ry=>{"use strict";Object.defineProperty(Ry,"__esModule",{value:!0});Ry.createSyntaxDiagramsCode=void 0;var AJ=Dv();function nwe(r,e){var t=e===void 0?{}:e,i=t.resourceBase,n=i===void 0?"https://unpkg.com/chevrotain@"+AJ.VERSION+"/diagrams/":i,s=t.css,o=s===void 0?"https://unpkg.com/chevrotain@"+AJ.VERSION+"/diagrams/diagrams.css":s,a=` + + + + + +`,l=` + +`,c=` +