1commit 425b33877c819dd88f3692aae37452c767371f6b
2Author: Simon Gardling <titaniumtown@proton.me>
3Date: Thu Sep 19 10:00:39 2024 -0400
4
5 use locally downloaded embeddings
6
7diff --git a/gpt4all-chat/CMakeLists.txt b/gpt4all-chat/CMakeLists.txt
8index 900307ae..802fc31a 100644
9--- a//CMakeLists.txt
10+++ b/CMakeLists.txt
11@@ -120,6 +120,7 @@ elseif (APPLE)
12 endif()
13
14 # Embedding model
15+#[[
16 set(LOCAL_EMBEDDING_MODEL "nomic-embed-text-v1.5.f16.gguf")
17 set(LOCAL_EMBEDDING_MODEL_MD5 "a5401e7f7e46ed9fcaed5b60a281d547")
18 set(LOCAL_EMBEDDING_MODEL_PATH "${CMAKE_BINARY_DIR}/resources/${LOCAL_EMBEDDING_MODEL}")
19@@ -134,6 +135,7 @@ message(STATUS "Embedding model downloaded to ${LOCAL_EMBEDDING_MODEL_PATH}")
20 if (APPLE)
21 list(APPEND CHAT_EXE_RESOURCES "${LOCAL_EMBEDDING_MODEL_PATH}")
22 endif()
23+]]
24
25 set(QAPPLICATION_CLASS QGuiApplication)
26 add_subdirectory(deps/SingleApplication)
27@@ -348,11 +350,13 @@ if (LLMODEL_CUDA)
28 endif()
29 endif()
30
31+#[[
32 if (NOT APPLE)
33 install(FILES "${LOCAL_EMBEDDING_MODEL_PATH}"
34 DESTINATION resources
35 COMPONENT ${COMPONENT_NAME_MAIN})
36 endif()
37+]]
38
39 set(CPACK_GENERATOR "IFW")
40 set(CPACK_VERBATIM_VARIABLES YES)
41diff --git a/gpt4all-chat/src/embllm.cpp b/gpt4all-chat/src/embllm.cpp
42index 81b1e9e1..e3266cc7 100644
43--- a/src/embllm.cpp
44+++ b/src/embllm.cpp
45@@ -84,7 +84,7 @@ bool EmbeddingLLMWorker::loadModel()
46
47 QString filePath = embPathFmt.arg(QCoreApplication::applicationDirPath(), LOCAL_EMBEDDING_MODEL);
48 if (!QFileInfo::exists(filePath)) {
49- qWarning() << "embllm WARNING: Local embedding model not found";
50+ qWarning() << "embllm WARNING: Local embedding model not found: " << filePath;
51 return false;
52 }
53