From 0b251f7146ccf6abed6631e1dd349236cdf355eb Mon Sep 17 00:00:00 2001 From: katrina Date: Mon, 29 May 2023 22:52:48 -0400 Subject: [PATCH 1/6] start support for huggingface --- .SAMPLE_env | 6 +++++ server/handlers/chat_handler.js | 44 +++++++++++++++++---------------- server/services/model_hf.js | 26 +++++++++++++++++++ server/services/model_openai.js | 38 ++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 21 deletions(-) create mode 100644 server/services/model_hf.js create mode 100644 server/services/model_openai.js diff --git a/.SAMPLE_env b/.SAMPLE_env index c1967c0..78fd0b6 100644 --- a/.SAMPLE_env +++ b/.SAMPLE_env @@ -1,2 +1,8 @@ +// Set which model you want to use +ENABLED_MODEL="HUGGINGFACEHUB" + +// Hugging Face API Key +HUGGINGFACEHUB_API_KEY="" + //Open API API Key OPENAI_API_KEY="" diff --git a/server/handlers/chat_handler.js b/server/handlers/chat_handler.js index 58d1165..618b240 100644 --- a/server/handlers/chat_handler.js +++ b/server/handlers/chat_handler.js @@ -1,32 +1,34 @@ -import { ConversationChain } from 'langchain/chains'; -import { ChatOpenAI } from 'langchain/chat_models/openai'; -import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'; -import { ConversationSummaryMemory } from 'langchain/memory'; +import { HuggingFaceService } from '../services/model_hf.js'; +import { OpenAiService } from '../services/model_openai.js'; + +const { ENABLED_MODEL } = process.env; class ChatService { constructor () { - this.chat = new ChatOpenAI({ temperature: 0, verbose: true }); - this.chatPrompt = ChatPromptTemplate.fromPromptMessages([ - SystemMessagePromptTemplate.fromTemplate('The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.'), - new MessagesPlaceholder('history'), - HumanMessagePromptTemplate.fromTemplate('{input}'), - ]); + } - this.memory = new ConversationSummaryMemory({ llm: this.chat, returnMessages: true }); + async selectModelService() { + let model; + switch (ENABLED_MODEL) { + case 'HUGGINGFACEHUB': + const huggingFaceService = new HuggingFaceService(); + model = huggingFaceService; + break; + case 'OPENAI': + const openAiService = new OpenAiService(); + model = openAiService; + break; + default: + break; + } + return model; } async startChat(data) { const { body: { userInput } } = data; - - const chain = new ConversationChain({ - memory: this.memory, - prompt: this.chatPrompt, - llm: this.chat, - }); - - const response = await chain.call({ - input: userInput, - }); + const model = this.selectModelService(); + console.log('model ', model.OpenAiService); + const response = await model.call(userInput); return response; } diff --git a/server/services/model_hf.js b/server/services/model_hf.js new file mode 100644 index 0000000..50148f4 --- /dev/null +++ b/server/services/model_hf.js @@ -0,0 +1,26 @@ +import { HuggingFaceInference } from 'langchain/llms/hf'; + +class HuggingFaceService { + constructor () { + this.model = new HuggingFaceInference({ + model: 'gpt2', + temperature: 0.7, + maxTokens: 50, + }); + } + + async getModel() { + const model = await this.model(); + } + + async call(userInput) { + console.log('this model ', this.model) + const response = await this.model.call( + userInput, + ); + return response; + } +} + +export { HuggingFaceService } + diff --git a/server/services/model_openai.js b/server/services/model_openai.js new file mode 100644 index 0000000..fae7d27 --- /dev/null +++ b/server/services/model_openai.js @@ -0,0 +1,38 @@ +import { ConversationChain } from 'langchain/chains'; +import { ChatOpenAI } from 'langchain/chat_models/openai'; +import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'; +import { ConversationSummaryMemory } from 'langchain/memory'; + +class OpenAiService { + constructor () { + this.model = new ChatOpenAI({ temperature: 0, verbose: true }); + + this.chatPrompt = ChatPromptTemplate.fromPromptMessages([ + SystemMessagePromptTemplate.fromTemplate('The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.'), + new MessagesPlaceholder('history'), + HumanMessagePromptTemplate.fromTemplate('{input}'), + ]); + + this.memory = new ConversationSummaryMemory({ llm: this.model, returnMessages: true }); + } + + assembleChain () { + const chain = new ConversationChain({ + memory: this.memory, + prompt: this.chatPrompt, + llm: this.model, + }); + return chain; + } + + call = async (userInput) => { + const chain = this.assembleChain(); + + const response = await chain.call({ + input: userInput, + }); + return response; + } +} + +export { OpenAiService }; From 21c030c421b3156ee451e14dacee4c782ad8b3e2 Mon Sep 17 00:00:00 2001 From: katrina Date: Tue, 30 May 2023 14:01:45 -0400 Subject: [PATCH 2/6] add config for model provider; support HF conversational model --- .SAMPLE_env | 4 +-- package.json | 3 ++- server/config/model_store_constants.js | 12 +++++++++ server/handlers/chat_handler.js | 26 +++---------------- server/services/hf.js | 25 ++++++++++++++++++ server/services/model_hf.js | 26 ------------------- .../services/{model_openai.js => openai.js} | 1 + yarn.lock | 5 ++++ 8 files changed, 50 insertions(+), 52 deletions(-) create mode 100644 server/config/model_store_constants.js create mode 100644 server/services/hf.js delete mode 100644 server/services/model_hf.js rename server/services/{model_openai.js => openai.js} (97%) diff --git a/.SAMPLE_env b/.SAMPLE_env index 78fd0b6..f408e01 100644 --- a/.SAMPLE_env +++ b/.SAMPLE_env @@ -1,5 +1,5 @@ -// Set which model you want to use -ENABLED_MODEL="HUGGINGFACEHUB" +// Set which model provider you want to use, HUGGING_FACE or OPEN_AI +ENABLED_MODEL_STORE="HUGGING_FACE" // Hugging Face API Key HUGGINGFACEHUB_API_KEY="" diff --git a/package.json b/package.json index 2ae10e8..06aaeaf 100644 --- a/package.json +++ b/package.json @@ -8,11 +8,12 @@ "start": "vite", "preview": "vite preview", "build": "vite build", - "start-server": "node ./server/index.js" + "start-server": "nodemon ./server/index.js" }, "dependencies": { "@emotion/react": "^11.11.0", "@emotion/styled": "^11.11.0", + "@huggingface/inference": "^2.5.0", "@koa/cors": "^4.0.0", "@koa/router": "^12.0.0", "@mui/icons-material": "^5.11.16", diff --git a/server/config/model_store_constants.js b/server/config/model_store_constants.js new file mode 100644 index 0000000..39d129c --- /dev/null +++ b/server/config/model_store_constants.js @@ -0,0 +1,12 @@ +import { HuggingFaceService } from '../services/hf.js' +import { OpenAiService } from '../services/openai.js' + +export const MODEL_STORES = { + 'HUGGING_FACE': HuggingFaceService, + 'OPEN_AI': OpenAiService, +}; + +export const { ENABLED_MODEL_STORE } = process.env; +export const DEFAULT_ENABLED_MODEL_STORE = 'HUGGING_FACE'; + +export const enabledModel = ENABLED_MODEL_STORE || DEFAULT_ENABLED_MODEL_STORE; \ No newline at end of file diff --git a/server/handlers/chat_handler.js b/server/handlers/chat_handler.js index 618b240..7c33de9 100644 --- a/server/handlers/chat_handler.js +++ b/server/handlers/chat_handler.js @@ -1,33 +1,13 @@ -import { HuggingFaceService } from '../services/model_hf.js'; -import { OpenAiService } from '../services/model_openai.js'; - -const { ENABLED_MODEL } = process.env; +import { MODEL_STORES, enabledModel } from '../config/model_store_constants.js'; class ChatService { constructor () { - } - - async selectModelService() { - let model; - switch (ENABLED_MODEL) { - case 'HUGGINGFACEHUB': - const huggingFaceService = new HuggingFaceService(); - model = huggingFaceService; - break; - case 'OPENAI': - const openAiService = new OpenAiService(); - model = openAiService; - break; - default: - break; - } - return model; + this.model = new MODEL_STORES[enabledModel] } async startChat(data) { const { body: { userInput } } = data; - const model = this.selectModelService(); - console.log('model ', model.OpenAiService); + const model = this.model; const response = await model.call(userInput); return response; diff --git a/server/services/hf.js b/server/services/hf.js new file mode 100644 index 0000000..7164aca --- /dev/null +++ b/server/services/hf.js @@ -0,0 +1,25 @@ +import { HfInference } from "@huggingface/inference"; + +const { HUGGINGFACEHUB_API_KEY } = process.env; + +class HuggingFaceService { + constructor () { + this.modelName = 'microsoft/DialoGPT-large'; + this.model = new HfInference(HUGGINGFACEHUB_API_KEY); + } + + async call(userInput) { + // TO DO: pass in past_user_inputs for context + const response = await this.model.conversational({ + model: this.modelName, + temperature: 0, + inputs: { + text: userInput, + } + }); + + return { response: response && response.generated_text }; + } +} + +export { HuggingFaceService } diff --git a/server/services/model_hf.js b/server/services/model_hf.js deleted file mode 100644 index 50148f4..0000000 --- a/server/services/model_hf.js +++ /dev/null @@ -1,26 +0,0 @@ -import { HuggingFaceInference } from 'langchain/llms/hf'; - -class HuggingFaceService { - constructor () { - this.model = new HuggingFaceInference({ - model: 'gpt2', - temperature: 0.7, - maxTokens: 50, - }); - } - - async getModel() { - const model = await this.model(); - } - - async call(userInput) { - console.log('this model ', this.model) - const response = await this.model.call( - userInput, - ); - return response; - } -} - -export { HuggingFaceService } - diff --git a/server/services/model_openai.js b/server/services/openai.js similarity index 97% rename from server/services/model_openai.js rename to server/services/openai.js index fae7d27..9f76062 100644 --- a/server/services/model_openai.js +++ b/server/services/openai.js @@ -31,6 +31,7 @@ class OpenAiService { const response = await chain.call({ input: userInput, }); + console.log('response ', response); return response; } } diff --git a/yarn.lock b/yarn.lock index 5bd1f7d..ddcf700 100644 --- a/yarn.lock +++ b/yarn.lock @@ -520,6 +520,11 @@ resolved "https://registry.yarnpkg.com/@fortaine/fetch-event-source/-/fetch-event-source-3.0.6.tgz#b8552a2ca2c5202f5699b93a92be0188d422b06e" integrity sha512-621GAuLMvKtyZQ3IA6nlDWhV1V/7PGOTNIGLUifxt0KzM+dZIweJ6F3XvQF3QnqeNfS1N7WQ0Kil1Di/lhChEw== +"@huggingface/inference@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@huggingface/inference/-/inference-2.5.0.tgz#8e14ee6696e91aecb132c90d3b07be8373e70338" + integrity sha512-X3NSdrWAKNTLAsEKabH48Wc+Osys+S7ilRcH1bf9trSDmJlzPVXDseXMRBHCFPCYd5AAAIakhENO4zCqstVg8g== + "@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2": version "0.3.3" resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098" From dcf8d954fb13f63898e4d018016d878403de4b30 Mon Sep 17 00:00:00 2001 From: katrina Date: Tue, 30 May 2023 14:06:44 -0400 Subject: [PATCH 3/6] update readme --- README.md | 2 +- server/services/openai.js | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 4e8785c..e58bd85 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ These are the available commands: To get started, follow the below steps: -1. Create an `.env` file by copying the `SAMPLE_env` file and add API keys for the models you are going to use +1. Create an `.env` file by copying the `SAMPLE_env` file and add the model store provider you'll be using (e.g. HuggingFace or OpenAI) and the API keys for the models you are going to use 1. Install packages 1. Run the backend and frontend servers diff --git a/server/services/openai.js b/server/services/openai.js index 9f76062..fae7d27 100644 --- a/server/services/openai.js +++ b/server/services/openai.js @@ -31,7 +31,6 @@ class OpenAiService { const response = await chain.call({ input: userInput, }); - console.log('response ', response); return response; } } From 11d67597e3c3047dc54815c28ac46cdc4fc52ce6 Mon Sep 17 00:00:00 2001 From: katrina Date: Tue, 30 May 2023 14:13:26 -0400 Subject: [PATCH 4/6] update tutorials and how to contribute --- README.md | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3a1b4a4..1b71c91 100644 --- a/README.md +++ b/README.md @@ -26,15 +26,19 @@ To get started, follow the below steps: 1. Create an `.env` file by copying the `SAMPLE_env` file and add the model store provider you'll be using (e.g. HuggingFace or OpenAI) and the API keys for the models you are going to use 1. Install packages 1. Run the backend server that will start with a default port of `3100` + ```bash yarn start-server ``` + 1. Run the frontend server that will start with a default port of `5173`. + ```bash yarn start ``` - + _Note:_ You can use the `-p` flag to specify a port for the frontend server. To do this, you can either run `yarn start` with an additional flag, like so: + ```bash yarn start -- --port 3000 ``` @@ -44,15 +48,26 @@ To get started, follow the below steps: ```bash vite --port 3000 ``` - + Additional scripts are provided to prepare the app for production + - `yarn build` — This will output a production build of the frontend app in the `dist` directory. - `yarn preview` — This will run the production build of the frontend app locally with a default port of `5173` (_note_: this will not work if you haven't generated the production build yet). +### Tutorials 👽 If you're looking for more thorough instructions follow [this tutorial on running an LLM React Node app](https://blog.golivecosmos.com/build-an-llm-app-with-node-react-and-langchain-js/). 📚 ------------- -## Shout out to the ⭐star gazers⭐ supporting the project +## How to Contribute + +Feel free to try out the template and open any issues if there's something you'd like to see added or fixed, or open a pull request to contribute. + +### Shout out to the ⭐star gazers⭐ supporting the project + [![Stargazers repo roster for @golivecosmos/llm-react-node-app-template](https://reporoster.com/stars/golivecosmos/llm-react-node-app-template)](https://github.com/golivecosmos/llm-react-node-app-template/stargazers) + +### Thanks for the forks🍴 + +[![Forkers repo roster for @golivecosmos/llm-react-node-app-template](https://reporoster.com/forks/golivecosmos/llm-react-node-app-template)](https://github.com/golivecosmos/llm-react-node-app-template/network/members) \ No newline at end of file From e11f3dc15bd6c4536b9596c3c282dfc6c9ec56c9 Mon Sep 17 00:00:00 2001 From: katrina Date: Tue, 30 May 2023 14:25:17 -0400 Subject: [PATCH 5/6] remove quotes from sample env values --- .SAMPLE_env | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.SAMPLE_env b/.SAMPLE_env index f408e01..d4122bd 100644 --- a/.SAMPLE_env +++ b/.SAMPLE_env @@ -1,8 +1,8 @@ // Set which model provider you want to use, HUGGING_FACE or OPEN_AI -ENABLED_MODEL_STORE="HUGGING_FACE" +ENABLED_MODEL_STORE=HUGGING_FACE // Hugging Face API Key -HUGGINGFACEHUB_API_KEY="" +HUGGINGFACEHUB_API_KEY= //Open API API Key -OPENAI_API_KEY="" +OPENAI_API_KEY= From 48ebf8523794b62542d8ab9afb2975bfa8139849 Mon Sep 17 00:00:00 2001 From: katrina Date: Tue, 30 May 2023 14:26:56 -0400 Subject: [PATCH 6/6] dont store this.model in variable --- server/handlers/chat_handler.js | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/handlers/chat_handler.js b/server/handlers/chat_handler.js index 7c33de9..cc2ac00 100644 --- a/server/handlers/chat_handler.js +++ b/server/handlers/chat_handler.js @@ -7,8 +7,7 @@ class ChatService { async startChat(data) { const { body: { userInput } } = data; - const model = this.model; - const response = await model.call(userInput); + const response = await this.model.call(userInput); return response; }