using Microsoft.SemanticKernel;
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddAzureOpenAIChatCompletion(
deploymentName: "NAME_OF_YOUR_DEPLOYMENT",
apiKey: "YOUR_API_KEY",
endpoint: "YOUR_AZURE_ENDPOINT",
modelId: "gpt-4", // Optional name of the underlying model if the deployment name doesn't match the model name
serviceId: "YOUR_SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddOpenAIChatCompletion(
modelId: "gpt-4",
apiKey: "YOUR_API_KEY",
orgId: "YOUR_ORG_ID", // Optional
serviceId: "YOUR_SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
#pragma warning disable SKEXP0070
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddOllamaChatCompletion(
modelId: "NAME_OF_MODEL", // E.g. "phi3" if phi3 was downloaded as described above.
endpoint: new Uri("YOUR_ENDPOINT"), // E.g. "http://localhost:11434" if Ollama has been started in docker as described above.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
#pragma warning disable SKEXP0070
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddBedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime, // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
#pragma warning disable SKEXP0070
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddBedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime, // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
#pragma warning disable SKEXP0070
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddOnnxRuntimeGenAIChatCompletion(
modelId: "NAME_OF_MODEL", // E.g. phi-3
modelPath: "PATH_ON_DISK", // Path to the model on disk e.g. C:\Repos\huggingface\microsoft\Phi-3-mini-4k-instruct-onnx\cpu_and_mobile\cpu-int4-rtn-block-32
serviceId: "SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
jsonSerializerOptions: customJsonSerializerOptions // Optional; for providing custom serialization settings for e.g. function argument / result serialization and parsing.
);
Kernel kernel = kernelBuilder.Build();
OpenAI チャット入力候補 API (LLM Studio など) をサポートする他の AI サービス プロバイダーの場合は、次のコードを使用して、既存の OpenAI チャット完了コネクタを再利用できます。
using Microsoft.SemanticKernel;
#pragma warning disable SKEXP0010
IKernelBuilder kernelBuilder = Kernel.CreateBuilder();
kernelBuilder.AddOpenAIChatCompletion(
modelId: "NAME_OF_MODEL",
apiKey: "API_KEY",
endpoint: new Uri("YOUR_ENDPOINT"), // Used to point to your service
serviceId: "SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
httpClient: new HttpClient() // Optional; for customizing HTTP client
);
Kernel kernel = kernelBuilder.Build();
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
builder.Services.AddAzureOpenAIChatCompletion(
deploymentName: "NAME_OF_YOUR_DEPLOYMENT",
apiKey: "YOUR_API_KEY",
endpoint: "YOUR_AZURE_ENDPOINT",
modelId: "gpt-4", // Optional name of the underlying model if the deployment name doesn't match the model name
serviceId: "YOUR_SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
builder.Services.AddOpenAIChatCompletion(
modelId: "gpt-4",
apiKey: "YOUR_API_KEY",
orgId: "YOUR_ORG_ID", // Optional; for OpenAI deployment
serviceId: "YOUR_SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
#pragma warning disable SKEXP0070
builder.Services.AddOllamaChatCompletion(
modelId: "NAME_OF_MODEL", // E.g. "phi3" if phi3 was downloaded as described above.
endpoint: new Uri("YOUR_ENDPOINT"), // E.g. "http://localhost:11434" if Ollama has been started in docker as described above.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
#pragma warning disable SKEXP0070
builder.Services.AddBedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime, // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
#pragma warning disable SKEXP0070
builder.Services.AddBedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime, // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
serviceId: "SERVICE_ID" // Optional; for targeting specific services within Semantic Kernel
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
#pragma warning disable SKEXP0070
builder.Services.AddOnnxRuntimeGenAIChatCompletion(
modelId: "NAME_OF_MODEL", // E.g. phi-3
modelPath: "PATH_ON_DISK", // Path to the model on disk e.g. C:\Repos\huggingface\microsoft\Phi-3-mini-4k-instruct-onnx\cpu_and_mobile\cpu-int4-rtn-block-32
serviceId: "SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
jsonSerializerOptions: customJsonSerializerOptions // Optional; for providing custom serialization settings for e.g. function argument / result serialization and parsing.
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
OpenAI チャット入力候補 API (LLM Studio など) をサポートする他の AI サービス プロバイダーの場合は、次のコードを使用して、既存の OpenAI チャット完了コネクタを再利用できます。
using Microsoft.SemanticKernel;
var builder = Host.CreateApplicationBuilder(args);
#pragma warning disable SKEXP0010
builder.Services.AddOpenAIChatCompletion(
modelId: "NAME_OF_MODEL",
apiKey: "API_KEY",
endpoint: new Uri("YOUR_ENDPOINT"), // Used to point to your service
serviceId: "SERVICE_ID", // Optional; for targeting specific services within Semantic Kernel
httpClient: new HttpClient() // Optional; for customizing HTTP client
);
builder.Services.AddTransient((serviceProvider)=> {
return new Kernel(serviceProvider);
});
using Microsoft.SemanticKernel.Connectors.AzureOpenAI;
AzureOpenAIChatCompletionService chatCompletionService = new (
deploymentName: "NAME_OF_YOUR_DEPLOYMENT",
apiKey: "YOUR_API_KEY",
endpoint: "YOUR_AZURE_ENDPOINT",
modelId: "gpt-4", // Optional name of the underlying model if the deployment name doesn't match the model name
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
using Microsoft.SemanticKernel.Connectors.OpenAI;
OpenAIChatCompletionService chatCompletionService = new (
modelId: "gpt-4",
apiKey: "YOUR_API_KEY",
organization: "YOUR_ORG_ID", // Optional
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
using Microsoft.SemanticKernel.Connectors.MistralAI;
#pragma warning disable SKEXP0070
MistralAIChatCompletionService chatCompletionService = new (
modelId: "NAME_OF_MODEL",
apiKey: "API_KEY",
endpoint: new Uri("YOUR_ENDPOINT"), // Optional
httpClient: new HttpClient() // Optional; for customizing HTTP client
);
重要
Google チャット完了コネクタは現在試験段階です。 これを使用するには、 #pragma warning disable SKEXP0070を追加する必要があります。
using Microsoft.SemanticKernel.Connectors.Google;
#pragma warning disable SKEXP0070
GoogleAIGeminiChatCompletionService chatCompletionService = new (
modelId: "NAME_OF_MODEL",
apiKey: "API_KEY",
apiVersion: GoogleAIVersion.V1, // Optional
httpClient: new HttpClient() // Optional; for customizing HTTP client
);
重要
Hugging Face チャット完了コネクタは現在試験段階です。 これを使用するには、 #pragma warning disable SKEXP0070を追加する必要があります。
using Microsoft.SemanticKernel.Connectors.HuggingFace;
#pragma warning disable SKEXP0070
HuggingFaceChatCompletionService chatCompletionService = new (
model: "NAME_OF_MODEL",
apiKey: "API_KEY",
endpoint: new Uri("YOUR_ENDPOINT") // Optional
);
重要
Azure AI 推論チャット完了コネクタは、現在試験段階です。 これを使用するには、 #pragma warning disable SKEXP0070を追加する必要があります。
using Microsoft.SemanticKernel.Connectors.AzureAIInference;
#pragma warning disable SKEXP0070
AzureAIInferenceChatCompletionService chatCompletionService = new (
modelId: "YOUR_MODEL_ID",
apiKey: "YOUR_API_KEY",
endpoint: new Uri("YOUR_ENDPOINT"), // Used to point to your service
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
using Microsoft.SemanticKernel.ChatCompletion;
using OllamaSharp;
#pragma warning disable SKEXP0070
using var ollamaClient = new OllamaApiClient(
uriString: "YOUR_ENDPOINT" // E.g. "http://localhost:11434" if Ollama has been started in docker as described above.
defaultModel: "NAME_OF_MODEL" // E.g. "phi3" if phi3 was downloaded as described above.
);
IChatCompletionService chatCompletionService = ollamaClient.AsChatCompletionService();
using Microsoft.SemanticKernel.Connectors.Amazon;
#pragma warning disable SKEXP0070
BedrockChatCompletionService chatCompletionService = new BedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
);
using Microsoft.SemanticKernel.Connectors.Amazon;
#pragma warning disable SKEXP0070
BedrockChatCompletionService chatCompletionService = new BedrockChatCompletionService(
modelId: "NAME_OF_MODEL",
bedrockRuntime: amazonBedrockRuntime // Optional; An instance of IAmazonBedrockRuntime, used to communicate with Azure Bedrock.
);
using Microsoft.SemanticKernel.Connectors.Onnx;
#pragma warning disable SKEXP0070
OnnxRuntimeGenAIChatCompletionService chatCompletionService = new OnnxRuntimeGenAIChatCompletionService(
modelId: "NAME_OF_MODEL", // E.g. phi-3
modelPath: "PATH_ON_DISK", // Path to the model on disk e.g. C:\Repos\huggingface\microsoft\Phi-3-mini-4k-instruct-onnx\cpu_and_mobile\cpu-int4-rtn-block-32
jsonSerializerOptions: customJsonSerializerOptions // Optional; for providing custom serialization settings for e.g. function argument / result serialization and parsing.
);
OpenAI チャット入力候補 API (LLM Studio など) をサポートする他の AI サービス プロバイダーの場合は、次のコードを使用して、既存の OpenAI チャット完了コネクタを再利用できます。
using Microsoft.SemanticKernel.Connectors.OpenAI;
#pragma warning disable SKEXP0010
OpenAIChatCompletionService chatCompletionService = new (
modelId: "gpt-4",
apiKey: "YOUR_API_KEY",
organization: "YOUR_ORG_ID", // Optional
endpoint: new Uri("YOUR_ENDPOINT"), // Used to point to your service
httpClient: new HttpClient() // Optional; if not provided, the HttpClient from the kernel will be used
);
チャット完了サービスを作成するには、必要なモジュールをインストールしてインポートし、サービスのインスタンスを作成する必要があります。 各 AI サービス プロバイダーのチャット完了サービスをインストールして作成する手順を次に示します。
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
chat_completion_service = AzureChatCompletion(
deployment_name="my-deployment",
api_key="my-api-key",
endpoint="my-api-endpoint", # Used to point to your service
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
# You can do the following if you have set the necessary environment variables or created a .env file
chat_completion_service = AzureChatCompletion(service_id="my-service-id")
Note
サービスでは、Microsoft Entra 認証 もサポートされています。 API キーを指定しない場合、サービスは Entra トークンを使用して認証を試みます。
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
chat_completion_service = OpenAIChatCompletion(
ai_model_id="my-deployment",
api_key="my-api-key",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
# You can do the following if you have set the necessary environment variables or created a .env file
chat_completion_service = OpenAIChatCompletion(service_id="my-service-id")
from semantic_kernel.connectors.ai.azure_ai_inference import AzureAIInferenceChatCompletion
chat_completion_service = AzureAIInferenceChatCompletion(
ai_model_id="my-deployment",
api_key="my-api-key",
endpoint="my-api-endpoint", # Used to point to your service
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
# You can do the following if you have set the necessary environment variables or created a .env file
chat_completion_service = AzureAIInferenceChatCompletion(ai_model_id="my-deployment", service_id="my-service-id")
# You can also use an Azure OpenAI deployment with the Azure AI Inference service
from azure.ai.inference.aio import ChatCompletionsClient
from azure.identity.aio import DefaultAzureCredential
chat_completion_service = AzureAIInferenceChatCompletion(
ai_model_id="my-deployment",
client=ChatCompletionsClient(
endpoint=f"{str(endpoint).strip('/')}/openai/deployments/{deployment_name}",
credential=DefaultAzureCredential(),
credential_scopes=["https://cognitiveservices.azure.com/.default"],
),
)
Note
サービスでは、Microsoft Entra 認証 もサポートされています。 API キーを指定しない場合、サービスは Entra トークンを使用して認証を試みます。
from semantic_kernel.connectors.ai.anthropic import AnthropicChatCompletion
chat_completion_service = AnthropicChatCompletion(
chat_model_id="model-id",
api_key="my-api-key",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
from semantic_kernel.connectors.ai.bedrock import BedrockChatCompletion
chat_completion_service = BedrockChatCompletion(
model_id="model-id",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
Note
Amazon Bedrock は API キーを受け入れていません。 環境を構成するには、この ガイドの に従ってください。
from semantic_kernel.connectors.ai.google.google_ai import GoogleAIChatCompletion
chat_completion_service = GoogleAIChatCompletion(
gemini_model_id="model-id",
api_key="my-api-key",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
ヒント
ユーザーは、Google AI Studio または Google Vertex プラットフォームを介して Google の Gemini モデルにアクセスできます。 環境を構成するには、この ガイドの に従ってください。
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAIChatCompletion
chat_completion_service = VertexAIChatCompletion(
project_id="my-project-id",
gemini_model_id="model-id",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
ヒント
ユーザーは、Google AI Studio または Google Vertex プラットフォームを介して Google の Gemini モデルにアクセスできます。 環境を構成するには、この ガイドの に従ってください。
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion
chat_completion_service = MistralAIChatCompletion(
ai_model_id="model-id",
api_key="my-api-key",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion
chat_completion_service = OllamaChatCompletion(
ai_model_id="model-id",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
from semantic_kernel.connectors.ai.onnx import OnnxGenAIChatCompletion
chat_completion_service = OnnxGenAIChatCompletion(
template="phi3v",
ai_model_path="model-path",
service_id="my-service-id", # Optional; for targeting specific services within Semantic Kernel
)
from semantic_kernel import Kernel
# Initialize the kernel
kernel = Kernel()
# Add the chat completion service created above to the kernel
kernel.add_service(chat_completion_service)
var chatCompletionService = kernel.GetRequiredService<IChatCompletionService>();
from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
# Retrieve the chat completion service by type
chat_completion_service = kernel.get_service(type=ChatCompletionClientBase)
# Retrieve the chat completion service by id
chat_completion_service = kernel.get_service(service_id="my-service-id")
# Retrieve the default inference settings
execution_settings = kernel.get_prompt_execution_settings_from_service_id("my-service-id")
ChatHistory history = [];
history.AddUserMessage("Hello, how are you?");
var response = await chatCompletionService.GetChatMessageContentAsync(
history,
kernel: kernel
);
chat_history = ChatHistory()
chat_history.add_user_message("Hello, how are you?")
response = await chat_completion.get_chat_message_content(
chat_history=history,
settings=execution_settings,
)
ChatHistory history = new ChatHistory();
history.addUserMessage("Hello, how are you?");
InvocationContext optionalInvocationContext = null;
List<ChatMessageContent<?>> response = chatCompletionService.getChatMessageContentsAsync(
history,
kernel,
optionalInvocationContext
);
ChatHistory history = [];
history.AddUserMessage("Hello, how are you?");
var response = chatCompletionService.GetStreamingChatMessageContentsAsync(
chatHistory: history,
kernel: kernel
);
await foreach (var chunk in response)
{
Console.Write(chunk);
}
chat_history = ChatHistory()
chat_history.add_user_message("Hello, how are you?")
response = chat_completion.get_streaming_chat_message_content(
chat_history=history,
settings=execution_settings,
)
async for chunk in response:
print(chunk, end="")