-
Notifications
You must be signed in to change notification settings - Fork 4.6k
Expand file tree
/
Copy pathchat_completion_services.py
More file actions
432 lines (340 loc) · 19.2 KB
/
chat_completion_services.py
File metadata and controls
432 lines (340 loc) · 19.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
# Copyright (c) Microsoft. All rights reserved.
from enum import Enum
from typing import TYPE_CHECKING
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError
if TYPE_CHECKING:
from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
class Services(str, Enum):
"""Enum for supported chat completion services.
For service specific settings, refer to this documentation:
https://github.com/microsoft/semantic-kernel/blob/main/python/samples/concepts/setup/ALL_SETTINGS.md
"""
OPENAI = "openai"
AZURE_OPENAI = "azure_openai"
AZURE_AI_INFERENCE = "azure_ai_inference"
ANTHROPIC = "anthropic"
BEDROCK = "bedrock"
GOOGLE_AI = "google_ai"
MISTRAL_AI = "mistral_ai"
OLLAMA = "ollama"
ONNX = "onnx"
VERTEX_AI = "vertex_ai"
DEEPSEEK = "deepseek"
NVIDIA = "nvidia"
service_id = "default"
def get_chat_completion_service_and_request_settings(
service_name: Services,
instruction_role: str | None = None,
) -> tuple["ChatCompletionClientBase", "PromptExecutionSettings"]:
"""Return service and request settings.
Args:
service_name (Services): The service name.
instruction_role (str | None): The role to use for 'instruction' messages, for example,
'system' or 'developer'. Defaults to 'system'. Currently only OpenAI reasoning models
support 'developer' role.
"""
# Use lambdas or functions to delay instantiation
chat_services = {
Services.OPENAI: lambda: get_openai_chat_completion_service_and_request_settings(
instruction_role=instruction_role
),
Services.AZURE_OPENAI: lambda: get_azure_openai_chat_completion_service_and_request_settings(
instruction_role=instruction_role
),
Services.AZURE_AI_INFERENCE: lambda: get_azure_ai_inference_chat_completion_service_and_request_settings(
instruction_role=instruction_role
),
Services.ANTHROPIC: lambda: get_anthropic_chat_completion_service_and_request_settings(),
Services.BEDROCK: lambda: get_bedrock_chat_completion_service_and_request_settings(),
Services.GOOGLE_AI: lambda: get_google_ai_chat_completion_service_and_request_settings(),
Services.MISTRAL_AI: lambda: get_mistral_ai_chat_completion_service_and_request_settings(),
Services.OLLAMA: lambda: get_ollama_chat_completion_service_and_request_settings(),
Services.ONNX: lambda: get_onnx_chat_completion_service_and_request_settings(),
Services.VERTEX_AI: lambda: get_vertex_ai_chat_completion_service_and_request_settings(),
Services.DEEPSEEK: lambda: get_deepseek_chat_completion_service_and_request_settings(),
Services.NVIDIA: lambda: get_nvidia_chat_completion_service_and_request_settings(),
}
# Call the appropriate lambda or function based on the service name
if service_name not in chat_services:
raise ValueError(f"Unsupported service name: {service_name}")
return chat_services[service_name]()
def get_openai_chat_completion_service_and_request_settings(
instruction_role: str | None = None,
) -> tuple["ChatCompletionClientBase", "PromptExecutionSettings"]:
"""Return OpenAI chat completion service and request settings.
Args:
instruction_role (str | None): The role to use for 'instruction' messages, for example,
'developer' or 'system'. (Optional)
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel-python
"""
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings
chat_service = OpenAIChatCompletion(service_id=service_id, instruction_role=instruction_role)
request_settings = OpenAIChatPromptExecutionSettings(
service_id=service_id, max_tokens=2000, temperature=0.7, top_p=0.8
)
return chat_service, request_settings
def get_azure_openai_chat_completion_service_and_request_settings(
instruction_role: str | None = None,
) -> tuple["ChatCompletionClientBase", "PromptExecutionSettings"]:
"""Return Azure OpenAI chat completion service and request settings.
Args:
instruction_role (str | None): The role to use for 'instruction' messages, for example,
'developer' or 'system'. (Optional)
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from azure.identity import AzureCliCredential
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, AzureChatPromptExecutionSettings
chat_service = AzureChatCompletion(
service_id=service_id, instruction_role=instruction_role, credential=AzureCliCredential()
)
request_settings = AzureChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_azure_ai_inference_chat_completion_service_and_request_settings(
instruction_role: str | None = None,
) -> tuple["ChatCompletionClientBase", "PromptExecutionSettings"]:
"""Return Azure AI Inference chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.azure_ai_inference import (
AzureAIInferenceChatCompletion,
AzureAIInferenceChatPromptExecutionSettings,
)
# The AI model ID is used as an identifier for developers when they are using serverless endpoints
# on AI Foundry. It is not actually used to identify the model in the service as the endpoint points
# to only one model.
# When developers are using one endpoint that can route to multiple models, the `ai_model_id` will be
# used to identify the model. To use the latest routing feature on AI Foundry, please refer to the
# following documentation:
# https://learn.microsoft.com/en-us/azure/ai-services/multi-service-resource?%3Fcontext=%2Fazure%2Fai-services%2Fmodel-inference%2Fcontext%2Fcontext&pivots=azportal
# https://learn.microsoft.com/en-us/azure/ai-foundry/model-inference/how-to/configure-project-connection?pivots=ai-foundry-portal
# https://learn.microsoft.com/en-us/azure/ai-foundry/model-inference/how-to/inference?tabs=python
chat_service = AzureAIInferenceChatCompletion(
service_id=service_id,
ai_model_id="id",
instruction_role=instruction_role,
)
request_settings = AzureAIInferenceChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_anthropic_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Anthropic chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.anthropic import AnthropicChatCompletion, AnthropicChatPromptExecutionSettings
chat_service = AnthropicChatCompletion(service_id=service_id)
request_settings = AnthropicChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_bedrock_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Bedrock chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.bedrock import BedrockChatCompletion, BedrockChatPromptExecutionSettings
chat_service = BedrockChatCompletion(service_id=service_id)
request_settings = BedrockChatPromptExecutionSettings(
# For model specific settings, specify them in the extension_data dictionary.
# For example, for Cohere Command specific settings, refer to:
# https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
service_id=service_id,
extension_data={
"temperature": 0.8,
},
)
return chat_service, request_settings
def get_google_ai_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Google AI chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.google import GoogleAIChatCompletion, GoogleAIChatPromptExecutionSettings
chat_service = GoogleAIChatCompletion(service_id=service_id)
request_settings = GoogleAIChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_mistral_ai_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Mistral AI chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion, MistralAIChatPromptExecutionSettings
chat_service = MistralAIChatCompletion(service_id=service_id)
request_settings = MistralAIChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_ollama_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Ollama chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion, OllamaChatPromptExecutionSettings
chat_service = OllamaChatCompletion(service_id=service_id)
request_settings = OllamaChatPromptExecutionSettings(
# For model specific settings, specify them in the options dictionary.
# For more information on the available options, refer to the Ollama API documentation:
# https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
service_id=service_id,
options={
"temperature": 0.8,
},
)
return chat_service, request_settings
def get_onnx_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Onnx chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion, OnnxGenAIPromptExecutionSettings
chat_service = OnnxGenAIChatCompletion(template="phi4mm", service_id=service_id)
request_settings = OnnxGenAIPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_vertex_ai_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return Vertex AI chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel
"""
from semantic_kernel.connectors.ai.google import GoogleAIChatCompletion, GoogleAIChatPromptExecutionSettings
chat_service = GoogleAIChatCompletion(service_id=service_id, use_vertexai=True)
request_settings = GoogleAIChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_deepseek_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return DeepSeek chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The DeepSeek endpoint can be accessed via the OpenAI connector as the DeepSeek API is compatible with OpenAI API.
Set the `OPENAI_API_KEY` environment variable to the DeepSeek API key.
Set the `OPENAI_CHAT_MODEL_ID` environment variable to the DeepSeek model ID (deepseek-chat or deepseek-reasoner).
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel-python
"""
from openai import AsyncOpenAI
from semantic_kernel.connectors.ai.open_ai import (
OpenAIChatCompletion,
OpenAIChatPromptExecutionSettings,
OpenAISettings,
)
openai_settings = OpenAISettings()
if not openai_settings.api_key:
raise ServiceInitializationError("The DeepSeek API key is required.")
if not openai_settings.chat_model_id:
raise ServiceInitializationError("The DeepSeek model ID is required.")
chat_service = OpenAIChatCompletion(
ai_model_id=openai_settings.chat_model_id,
service_id=service_id,
async_client=AsyncOpenAI(
api_key=openai_settings.api_key.get_secret_value(),
base_url="https://api.deepseek.com",
),
)
request_settings = OpenAIChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings
def get_nvidia_chat_completion_service_and_request_settings() -> tuple[
"ChatCompletionClientBase", "PromptExecutionSettings"
]:
"""Return NVIDIA chat completion service and request settings.
The service credentials can be read by 3 ways:
1. Via the constructor
2. Via the environment variables
3. Via an environment file
The request settings control the behavior of the service. The default settings are sufficient to get started.
However, you can adjust the settings to suit your needs.
Note: Some of the settings are NOT meant to be set by the user.
Please refer to the Semantic Kernel Python documentation for more information:
https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel-python
"""
from semantic_kernel.connectors.ai.nvidia import NvidiaChatCompletion, NvidiaChatPromptExecutionSettings
chat_service = NvidiaChatCompletion(service_id=service_id)
request_settings = NvidiaChatPromptExecutionSettings(service_id=service_id)
return chat_service, request_settings