-
Notifications
You must be signed in to change notification settings - Fork 11.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(Sentiment Analysis Node): Implement Sentiment Analysis node (#10184
- Loading branch information
1 parent
512eb11
commit 8ef0a0c
Showing
4 changed files
with
319 additions
and
0 deletions.
There are no files selected for viewing
257 changes: 257 additions & 0 deletions
257
packages/@n8n/nodes-langchain/nodes/chains/SentimentAnalysis/SentimentAnalysis.node.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,257 @@ | ||
import type { | ||
IDataObject, | ||
IExecuteFunctions, | ||
INodeExecutionData, | ||
INodeParameters, | ||
INodeType, | ||
INodeTypeDescription, | ||
} from 'n8n-workflow'; | ||
|
||
import { NodeConnectionType, NodeOperationError } from 'n8n-workflow'; | ||
|
||
import type { BaseLanguageModel } from '@langchain/core/language_models/base'; | ||
import { HumanMessage } from '@langchain/core/messages'; | ||
import { SystemMessagePromptTemplate, ChatPromptTemplate } from '@langchain/core/prompts'; | ||
import { OutputFixingParser, StructuredOutputParser } from 'langchain/output_parsers'; | ||
import { z } from 'zod'; | ||
import { getTracingConfig } from '../../../utils/tracing'; | ||
|
||
const DEFAULT_SYSTEM_PROMPT_TEMPLATE = | ||
'You are highly intelligent and accurate sentiment analyzer. Analyze the sentiment of the provided text. Categorize it into one of the following: {categories}. Use the provided formatting instructions. Only output the JSON.'; | ||
|
||
const DEFAULT_CATEGORIES = 'Positive, Neutral, Negative'; | ||
const configuredOutputs = (parameters: INodeParameters, defaultCategories: string) => { | ||
const options = (parameters?.options ?? {}) as IDataObject; | ||
const categories = (options?.categories as string) ?? defaultCategories; | ||
const categoriesArray = categories.split(',').map((cat) => cat.trim()); | ||
|
||
const ret = categoriesArray.map((cat) => ({ type: NodeConnectionType.Main, displayName: cat })); | ||
return ret; | ||
}; | ||
|
||
export class SentimentAnalysis implements INodeType { | ||
description: INodeTypeDescription = { | ||
displayName: 'Sentiment Analysis', | ||
name: 'sentimentAnalysis', | ||
icon: 'fa:balance-scale-left', | ||
iconColor: 'black', | ||
group: ['transform'], | ||
version: 1, | ||
description: 'Analyze the sentiment of your text', | ||
codex: { | ||
categories: ['AI'], | ||
subcategories: { | ||
AI: ['Chains', 'Root Nodes'], | ||
}, | ||
resources: { | ||
primaryDocumentation: [ | ||
{ | ||
url: 'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.sentimentanalysis/', | ||
}, | ||
], | ||
}, | ||
}, | ||
defaults: { | ||
name: 'Sentiment Analysis', | ||
}, | ||
inputs: [ | ||
{ displayName: '', type: NodeConnectionType.Main }, | ||
{ | ||
displayName: 'Model', | ||
maxConnections: 1, | ||
type: NodeConnectionType.AiLanguageModel, | ||
required: true, | ||
}, | ||
], | ||
outputs: `={{(${configuredOutputs})($parameter, "${DEFAULT_CATEGORIES}")}}`, | ||
properties: [ | ||
{ | ||
displayName: 'Text to Analyze', | ||
name: 'inputText', | ||
type: 'string', | ||
required: true, | ||
default: '', | ||
description: 'Use an expression to reference data in previous nodes or enter static text', | ||
typeOptions: { | ||
rows: 2, | ||
}, | ||
}, | ||
{ | ||
displayName: | ||
'Sentiment scores are LLM-generated estimates, not statistically rigorous measurements. They may be inconsistent across runs and should be used as rough indicators only.', | ||
name: 'detailedResultsNotice', | ||
type: 'notice', | ||
default: '', | ||
displayOptions: { | ||
show: { | ||
'/options.includeDetailedResults': [true], | ||
}, | ||
}, | ||
}, | ||
{ | ||
displayName: 'Options', | ||
name: 'options', | ||
type: 'collection', | ||
default: {}, | ||
placeholder: 'Add Option', | ||
options: [ | ||
{ | ||
displayName: 'Sentiment Categories', | ||
name: 'categories', | ||
type: 'string', | ||
default: DEFAULT_CATEGORIES, | ||
description: 'A comma-separated list of categories to analyze', | ||
noDataExpression: true, | ||
typeOptions: { | ||
rows: 2, | ||
}, | ||
}, | ||
{ | ||
displayName: 'System Prompt Template', | ||
name: 'systemPromptTemplate', | ||
type: 'string', | ||
default: DEFAULT_SYSTEM_PROMPT_TEMPLATE, | ||
description: 'String to use directly as the system prompt template', | ||
typeOptions: { | ||
rows: 6, | ||
}, | ||
}, | ||
{ | ||
displayName: 'Include Detailed Results', | ||
name: 'includeDetailedResults', | ||
type: 'boolean', | ||
default: false, | ||
description: | ||
'Whether to include sentiment strength and confidence scores in the output', | ||
}, | ||
{ | ||
displayName: 'Enable Auto-Fixing', | ||
name: 'enableAutoFixing', | ||
type: 'boolean', | ||
default: true, | ||
description: 'Whether to enable auto-fixing for the output parser', | ||
}, | ||
], | ||
}, | ||
], | ||
}; | ||
|
||
async execute(this: IExecuteFunctions): Promise<INodeExecutionData[][]> { | ||
const items = this.getInputData(); | ||
|
||
const llm = (await this.getInputConnectionData( | ||
NodeConnectionType.AiLanguageModel, | ||
0, | ||
)) as BaseLanguageModel; | ||
|
||
const returnData: INodeExecutionData[][] = []; | ||
|
||
for (let i = 0; i < items.length; i++) { | ||
try { | ||
const sentimentCategories = this.getNodeParameter( | ||
'options.categories', | ||
i, | ||
DEFAULT_CATEGORIES, | ||
) as string; | ||
|
||
const categories = sentimentCategories | ||
.split(',') | ||
.map((cat) => cat.trim()) | ||
.filter(Boolean); | ||
|
||
if (categories.length === 0) { | ||
throw new NodeOperationError(this.getNode(), 'No sentiment categories provided', { | ||
itemIndex: i, | ||
}); | ||
} | ||
|
||
// Initialize returnData with empty arrays for each category | ||
if (returnData.length === 0) { | ||
returnData.push(...Array.from({ length: categories.length }, () => [])); | ||
} | ||
|
||
const options = this.getNodeParameter('options', i, {}) as { | ||
systemPromptTemplate?: string; | ||
includeDetailedResults?: boolean; | ||
enableAutoFixing?: boolean; | ||
}; | ||
|
||
const schema = z.object({ | ||
sentiment: z.enum(categories as [string, ...string[]]), | ||
strength: z | ||
.number() | ||
.min(0) | ||
.max(1) | ||
.describe('Strength score for sentiment in relation to the category'), | ||
confidence: z.number().min(0).max(1), | ||
}); | ||
|
||
const structuredParser = StructuredOutputParser.fromZodSchema(schema); | ||
|
||
const parser = options.enableAutoFixing | ||
? OutputFixingParser.fromLLM(llm, structuredParser) | ||
: structuredParser; | ||
|
||
const systemPromptTemplate = SystemMessagePromptTemplate.fromTemplate( | ||
`${options.systemPromptTemplate ?? DEFAULT_SYSTEM_PROMPT_TEMPLATE} | ||
{format_instructions}`, | ||
); | ||
|
||
const input = this.getNodeParameter('inputText', i) as string; | ||
const inputPrompt = new HumanMessage(input); | ||
const messages = [ | ||
await systemPromptTemplate.format({ | ||
categories: sentimentCategories, | ||
format_instructions: parser.getFormatInstructions(), | ||
}), | ||
inputPrompt, | ||
]; | ||
|
||
const prompt = ChatPromptTemplate.fromMessages(messages); | ||
const chain = prompt.pipe(llm).pipe(parser).withConfig(getTracingConfig(this)); | ||
|
||
try { | ||
const output = await chain.invoke(messages); | ||
const sentimentIndex = categories.findIndex( | ||
(s) => s.toLowerCase() === output.sentiment.toLowerCase(), | ||
); | ||
|
||
if (sentimentIndex !== -1) { | ||
const resultItem = { ...items[i] }; | ||
const sentimentAnalysis: IDataObject = { | ||
category: output.sentiment, | ||
}; | ||
if (options.includeDetailedResults) { | ||
sentimentAnalysis.strength = output.strength; | ||
sentimentAnalysis.confidence = output.confidence; | ||
} | ||
resultItem.json = { | ||
...resultItem.json, | ||
sentimentAnalysis, | ||
}; | ||
returnData[sentimentIndex].push(resultItem); | ||
} | ||
} catch (error) { | ||
throw new NodeOperationError( | ||
this.getNode(), | ||
'Error during parsing of LLM output, please check your LLM model and configuration', | ||
{ | ||
itemIndex: i, | ||
}, | ||
); | ||
} | ||
} catch (error) { | ||
if (this.continueOnFail(error)) { | ||
const executionErrorData = this.helpers.constructExecutionMetaData( | ||
this.helpers.returnJsonArray({ error: error.message }), | ||
{ itemData: { item: i } }, | ||
); | ||
returnData[0].push(...executionErrorData); | ||
continue; | ||
} | ||
throw error; | ||
} | ||
} | ||
return returnData; | ||
} | ||
} |
59 changes: 59 additions & 0 deletions
59
...es/@n8n/nodes-langchain/nodes/vector_store/VectorStoreIndexer/VectorStoreInMemory.node.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
import type { INodeProperties } from 'n8n-workflow'; | ||
import { createVectorStoreNode } from '../shared/createVectorStoreNode'; | ||
import { MemoryVectorStoreManager } from '../shared/MemoryVectorStoreManager'; | ||
|
||
const insertFields: INodeProperties[] = [ | ||
{ | ||
displayName: | ||
'The embbded data are stored in the server memory, so they will be lost when the server is restarted. Additionally, if the amount of data is too large, it may cause the server to crash due to insufficient memory.', | ||
name: 'notice', | ||
type: 'notice', | ||
default: '', | ||
}, | ||
{ | ||
displayName: 'Clear Store', | ||
name: 'clearStore', | ||
type: 'boolean', | ||
default: false, | ||
description: 'Whether to clear the store before inserting new data', | ||
}, | ||
]; | ||
|
||
export const VectorStoreInMemory = createVectorStoreNode({ | ||
meta: { | ||
displayName: 'In-Memory Vector Store', | ||
name: 'vectorStoreInMemory', | ||
description: 'Work with your data in In-Memory Vector Store', | ||
icon: 'fa:database', | ||
docsUrl: | ||
'https://docs.n8n.io/integrations/builtin/cluster-nodes/root-nodes/n8n-nodes-langchain.vectorstoreinmemory/', | ||
}, | ||
sharedFields: [ | ||
{ | ||
displayName: 'Memory Key', | ||
name: 'memoryKey', | ||
type: 'string', | ||
default: 'vector_store_key', | ||
description: | ||
'The key to use to store the vector memory in the workflow data. The key will be prefixed with the workflow ID to avoid collisions.', | ||
}, | ||
], | ||
insertFields, | ||
loadFields: [], | ||
retrieveFields: [], | ||
async getVectorStoreClient(context, _filter, embeddings, itemIndex) { | ||
const workflowId = context.getWorkflow().id; | ||
const memoryKey = context.getNodeParameter('memoryKey', itemIndex) as string; | ||
const vectorStoreSingleton = MemoryVectorStoreManager.getInstance(embeddings); | ||
|
||
return await vectorStoreSingleton.getVectorStore(`${workflowId}__${memoryKey}`); | ||
}, | ||
async populateVectorStore(context, embeddings, documents, itemIndex) { | ||
const memoryKey = context.getNodeParameter('memoryKey', itemIndex) as string; | ||
const clearStore = context.getNodeParameter('clearStore', itemIndex) as boolean; | ||
const workflowId = context.getWorkflow().id; | ||
const vectorStoreInstance = MemoryVectorStoreManager.getInstance(embeddings); | ||
|
||
void vectorStoreInstance.addDocuments(`${workflowId}__${memoryKey}`, documents, clearStore); | ||
}, | ||
}); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters