Skip to content

Feature/update openai version, add reasoning effort param, add o3 mini #3973

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Feb 4, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@
},
"resolutions": {
"@google/generative-ai": "^0.15.0",
"@langchain/core": "0.3.29",
"@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6",
"openai": "4.57.3",
"openai": "4.82.0",
"protobufjs": "7.4.0"
},
"eslintIgnore": [
Expand Down
12 changes: 12 additions & 0 deletions packages/components/models.json
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,14 @@
{
"name": "azureChatOpenAI",
"models": [
{
"label": "o3-mini",
"name": "o3-mini"
},
{
"label": "o1",
"name": "o1"
},
{
"label": "o1-preview",
"name": "o1-preview"
Expand Down Expand Up @@ -397,6 +405,10 @@
{
"name": "chatGoogleGenerativeAI",
"models": [
{
"label": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp"
},
{
"label": "gemini-1.5-flash-latest",
"name": "gemini-1.5-flash-latest"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class GraphCypherQA_Chain implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Graph Cypher QA Chain'
this.name = 'graphCypherQAChain'
this.version = 1.0
this.version = 1.1
this.type = 'GraphCypherQAChain'
this.icon = 'graphqa.svg'
this.category = 'Chains'
Expand All @@ -47,7 +47,8 @@ class GraphCypherQA_Chain implements INode {
name: 'cypherPrompt',
optional: true,
type: 'BasePromptTemplate',
description: 'Prompt template for generating Cypher queries. Must include {schema} and {question} variables'
description:
'Prompt template for generating Cypher queries. Must include {schema} and {question} variables. If not provided, default prompt will be used.'
},
{
label: 'Cypher Generation Model',
Expand All @@ -61,7 +62,8 @@ class GraphCypherQA_Chain implements INode {
name: 'qaPrompt',
optional: true,
type: 'BasePromptTemplate',
description: 'Prompt template for generating answers. Must include {context} and {question} variables'
description:
'Prompt template for generating answers. Must include {context} and {question} variables. If not provided, default prompt will be used.'
},
{
label: 'QA Model',
Expand Down Expand Up @@ -111,6 +113,10 @@ class GraphCypherQA_Chain implements INode {
const returnDirect = nodeData.inputs?.returnDirect as boolean
const output = nodeData.outputs?.output as string

if (!model) {
throw new Error('Language Model is required')
}

// Handle prompt values if they exist
let cypherPromptTemplate: PromptTemplate | FewShotPromptTemplate | undefined
let qaPromptTemplate: PromptTemplate | undefined
Expand Down Expand Up @@ -147,10 +153,6 @@ class GraphCypherQA_Chain implements INode {
})
}

if ((!cypherModel || !qaModel) && !model) {
throw new Error('Language Model is required when Cypher Model or QA Model are not provided')
}

// Validate required variables in prompts
if (
cypherPromptTemplate &&
Expand All @@ -165,13 +167,13 @@ class GraphCypherQA_Chain implements INode {
returnDirect
}

if (cypherModel && cypherPromptTemplate) {
fromLLMInput['cypherLLM'] = cypherModel
if (cypherPromptTemplate) {
fromLLMInput['cypherLLM'] = cypherModel ?? model
fromLLMInput['cypherPrompt'] = cypherPromptTemplate
}

if (qaModel && qaPromptTemplate) {
fromLLMInput['qaLLM'] = qaModel
if (qaPromptTemplate) {
fromLLMInput['qaLLM'] = qaModel ?? model
fromLLMInput['qaPrompt'] = qaPromptTemplate
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, ClientOptions, LegacyOpenAIInput } from '@langchain/openai'
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'

const serverCredentialsExists =
!!process.env.AZURE_OPENAI_API_KEY &&
Expand Down Expand Up @@ -33,7 +31,7 @@ class AzureChatOpenAI_ChatModels implements INode {
this.icon = 'Azure.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(LangchainAzureChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
Expand Down Expand Up @@ -155,6 +153,29 @@ class AzureChatOpenAI_ChatModels implements INode {
default: 'low',
optional: false,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
name: 'reasoningEffort',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'Medium',
name: 'medium'
},
{
label: 'High',
name: 'high'
}
],
default: 'low',
optional: false,
additionalParams: true
}
]
}
Expand All @@ -178,6 +199,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort

const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
Expand All @@ -188,10 +210,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string

const obj: Partial<AzureOpenAIInput> &
BaseLLMParams &
Partial<OpenAIChatInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
const obj: ChatOpenAIFields & Partial<AzureOpenAIInput> = {
temperature: parseFloat(temperature),
modelName,
azureOpenAIApiKey,
Expand All @@ -218,6 +237,12 @@ class AzureChatOpenAI_ChatModels implements INode {
console.error('Error parsing base options', exception)
}
}
if (modelName === 'o3-mini') {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
}

const multiModalOption: IMultiModalOption = {
image: {
Expand All @@ -226,7 +251,7 @@ class AzureChatOpenAI_ChatModels implements INode {
}
}

const model = new ChatOpenAI(nodeData.id, obj)
const model = new AzureChatOpenAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
return model
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import { AzureChatOpenAI as LangchainAzureChatOpenAI, OpenAIChatInput, AzureOpenAIInput, ClientOptions } from '@langchain/openai'
import { IMultiModalOption, IVisionChatModal } from '../../../src'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'

export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string

constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> & {
openAIApiKey?: string
openAIApiVersion?: string
openAIBasePath?: string
deploymentName?: string
} & BaseChatModelParams & {
configuration?: ClientOptions
}
) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens
}

revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}

setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}

setVisionModel(): void {
// pass
}
}
18 changes: 11 additions & 7 deletions packages/components/nodes/chatmodels/ChatCerebras/ChatCerebras.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'

Expand Down Expand Up @@ -135,7 +134,7 @@ class ChatCerebras_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cerebrasAIApiKey = getCredentialParam('cerebrasApiKey', credentialData, nodeData)

const obj: Partial<OpenAIChatInput> & BaseLLMParams = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: cerebrasAIApiKey,
Expand All @@ -158,10 +157,15 @@ class ChatCerebras_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})

if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}

const model = new ChatOpenAI(obj)
return model
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'

Expand Down Expand Up @@ -108,7 +107,7 @@ class ChatLocalAI_ChatModels implements INode {

const cache = nodeData.inputs?.cache as BaseCache

const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: 'sk-',
Expand All @@ -120,8 +119,9 @@ class ChatLocalAI_ChatModels implements INode {
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
if (basePath) obj.configuration = { baseURL: basePath }

const model = new ChatOpenAI(obj, { basePath })
const model = new ChatOpenAI(obj)

return model
}
Expand Down
19 changes: 11 additions & 8 deletions packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'

Expand Down Expand Up @@ -134,7 +133,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData)

const obj: Partial<OpenAIChatInput> & BaseLLMParams & { nvdiaNIMApiKey?: string } = {
const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: nvdiaNIMApiKey,
Expand All @@ -154,14 +153,18 @@ class ChatNvdiaNIM_ChatModels implements INode {
try {
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
} catch (exception) {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception)
}
}

const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}

const model = new ChatOpenAI(obj)
return model
}
}
Expand Down
Loading