-
Notifications
You must be signed in to change notification settings - Fork 649
Expand file tree
/
Copy pathdoubaoProvider.ts
More file actions
88 lines (84 loc) · 2.21 KB
/
doubaoProvider.ts
File metadata and controls
88 lines (84 loc) · 2.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import { LLM_PROVIDER, LLMResponse, MODEL_META, ChatMessage } from '@shared/presenter'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { ConfigPresenter } from '../../configPresenter'
export class DoubaoProvider extends OpenAICompatibleProvider {
constructor(provider: LLM_PROVIDER, configPresenter: ConfigPresenter) {
// 初始化豆包模型配置
super(provider, configPresenter)
}
protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
return [
{
id: 'doubao-1-5-pro-32k-250115',
name: 'doubao-1.5-pro-32k-250115',
group: 'doubao',
providerId: this.provider.id,
isCustom: false,
contextLength: 32000,
maxTokens: 4096
},
{
id: 'deepseek-r1-250120',
name: 'deepseek-r1',
group: 'doubao',
providerId: this.provider.id,
isCustom: false,
contextLength: 64000,
maxTokens: 4096
},
{
id: 'deepseek-r1-distill-qwen-32b-250120',
name: 'deepseek-r1-distill-qwen-32b',
group: 'doubao',
providerId: this.provider.id,
isCustom: false,
contextLength: 32000,
maxTokens: 4096
},
{
id: 'deepseek-r1-distill-qwen-7b-250120',
name: 'deepseek-r1-distill-qwen-7b',
group: 'doubao',
providerId: this.provider.id,
isCustom: false,
contextLength: 32000,
maxTokens: 4096
},
{
id: 'deepseek-v3-250324',
name: 'deepseek-v3',
group: 'doubao',
providerId: this.provider.id,
isCustom: false,
contextLength: 64000,
maxTokens: 4096
}
]
}
async completions(
messages: ChatMessage[],
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}
async generateText(
prompt: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: prompt
}
],
modelId,
temperature,
maxTokens
)
}
}