Skip to content

Commit

Permalink
Bugfix/Avoid hardcoded max tokens (#2003)
Browse files Browse the repository at this point in the history
remove hard-coded max tokens
  • Loading branch information
HenryHengZJ authored Mar 25, 2024
1 parent 4ca82ee commit 414b9f1
Show file tree
Hide file tree
Showing 5 changed files with 14 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@ import { IVisionChatModal, IMultiModalOption } from '../../../src'

export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
configuredModel: string
configuredMaxToken: number
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string

constructor(id: string, fields: BaseBedrockInput & BaseChatModelParams) {
super(fields)
this.id = id
this.configuredModel = fields?.model || 'anthropic.claude-3-haiku-20240307-v1:0'
this.configuredMaxToken = fields?.maxTokens ?? 256
this.configuredModel = fields?.model || ''
this.configuredMaxToken = fields?.maxTokens
}

revertToOriginalModel(): void {
Expand All @@ -28,7 +28,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
setVisionModel(): void {
if (!this.model.startsWith('claude-3')) {
super.model = 'anthropic.claude-3-haiku-20240307-v1:0'
super.maxTokens = 1024
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class ChatAnthropic_ChatModels implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokensToSample = nodeData.inputs?.maxTokensToSample as string
const maxTokens = nodeData.inputs?.maxTokensToSample as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean
Expand All @@ -143,7 +143,7 @@ class ChatAnthropic_ChatModels implements INode {
streaming: streaming ?? true
}

if (maxTokensToSample) obj.maxTokensToSample = parseInt(maxTokensToSample, 10)
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName || 'claude-3-haiku-20240307'
this.configuredMaxToken = fields?.maxTokens ?? 256
this.configuredModel = fields?.modelName || ''
this.configuredMaxToken = fields?.maxTokens ?? 2048
}

revertToOriginalModel(): void {
Expand All @@ -27,7 +27,7 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
setVisionModel(): void {
if (!this.modelName.startsWith('claude-3')) {
super.modelName = 'claude-3-haiku-20240307'
super.maxTokens = 1024
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 2048
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { IMultiModalOption, IVisionChatModal } from '../../../src'

export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken: number
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string

Expand All @@ -19,8 +19,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
) {
super(fields, configuration)
this.id = id
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
this.configuredMaxToken = fields?.maxTokens ?? 256
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens
}

revertToOriginalModel(): void {
Expand All @@ -34,6 +34,6 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal

setVisionModel(): void {
super.modelName = 'gpt-4-vision-preview'
super.maxTokens = 1024
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
}
}
3 changes: 1 addition & 2 deletions packages/components/src/Interface.ts
Original file line number Diff line number Diff line change
Expand Up @@ -274,9 +274,8 @@ export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory imp
export interface IVisionChatModal {
id: string
configuredModel: string
configuredMaxToken: number
multiModalOption: IMultiModalOption

configuredMaxToken?: number
setVisionModel(): void
revertToOriginalModel(): void
setMultiModalOption(multiModalOption: IMultiModalOption): void
Expand Down

0 comments on commit 414b9f1

Please sign in to comment.