-
Notifications
You must be signed in to change notification settings - Fork 649
Expand file tree
/
Copy patho3fanProvider.ts
More file actions
105 lines (98 loc) · 3.06 KB
/
o3fanProvider.ts
File metadata and controls
105 lines (98 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import {
LLM_PROVIDER,
LLMResponse,
MODEL_META,
ChatMessage,
IConfigPresenter
} from '@shared/presenter'
import { ModelType } from '@shared/model'
import {
resolveModelContextLength,
resolveModelFunctionCall,
resolveModelMaxTokens
} from '@shared/modelConfigDefaults'
import { OpenAICompatibleProvider } from './openAICompatibleProvider'
import { providerDbLoader } from '../../configPresenter/providerDbLoader'
import { modelCapabilities } from '../../configPresenter/modelCapabilities'
import type { ProviderMcpRuntimePort } from '../runtimePorts'
export class O3fanProvider extends OpenAICompatibleProvider {
constructor(
provider: LLM_PROVIDER,
configPresenter: IConfigPresenter,
mcpRuntime?: ProviderMcpRuntimePort
) {
super(provider, configPresenter, mcpRuntime)
}
protected async fetchOpenAIModels(): Promise<MODEL_META[]> {
const resolvedId = modelCapabilities.resolveProviderId(this.provider.id) || this.provider.id
const provider = providerDbLoader.getProvider(resolvedId)
if (!provider || !Array.isArray(provider.models)) {
return []
}
return provider.models.map((model) => {
const inputs = model.modalities?.input
const outputs = model.modalities?.output
const hasImageInput = Array.isArray(inputs) && inputs.includes('image')
const hasImageOutput = Array.isArray(outputs) && outputs.includes('image')
const modelType = hasImageOutput ? ModelType.ImageGeneration : ModelType.Chat
return {
id: model.id,
name: model.display_name || model.name || model.id,
group: 'o3fan',
providerId: this.provider.id,
isCustom: false,
contextLength: resolveModelContextLength(model.limit?.context),
maxTokens: resolveModelMaxTokens(model.limit?.output),
vision: hasImageInput,
functionCall: resolveModelFunctionCall(model.tool_call),
reasoning: Boolean(model.reasoning?.supported),
enableSearch: Boolean(model.search?.supported),
type: modelType
}
})
}
async completions(
messages: ChatMessage[],
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(messages, modelId, temperature, maxTokens)
}
async summaries(
text: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: `You need to summarize the user's conversation into a title of no more than 10 words, with the title language matching the user's primary language, without using punctuation or other special symbols:\n${text}`
}
],
modelId,
temperature,
maxTokens
)
}
async generateText(
prompt: string,
modelId: string,
temperature?: number,
maxTokens?: number
): Promise<LLMResponse> {
return this.openAICompletion(
[
{
role: 'user',
content: prompt
}
],
modelId,
temperature,
maxTokens
)
}
}