- Notifications
You must be signed in to change notification settings - Fork 3k
/
Copy pathsample_get_model_info.py
52 lines (40 loc) · 1.76 KB
/
sample_get_model_info.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
DESCRIPTION:
This sample demonstrates how to get information about the AI model, using the
synchronous chat completions client. Similarly can be done with the other
clients.
The get_model_info() method on the client only works with Serverless API or
Managed Compute endpoints.
USAGE:
python sample_get_model_info.py
Set these two environment variables before running the sample:
1) AZURE_AI_CHAT_ENDPOINT - Your endpoint URL, in the form
https://<your-deployment-name>.<your-azure-region>.models.ai.azure.com
where `your-deployment-name` is your unique AI Model deployment name, and
`your-azure-region` is the Azure region where your model is deployed.
2) AZURE_AI_CHAT_KEY - Your model key). Keep it secret.
"""
defsample_get_model_info():
importos
try:
endpoint=os.environ["AZURE_AI_CHAT_ENDPOINT"]
key=os.environ["AZURE_AI_CHAT_KEY"]
exceptKeyError:
print("Missing environment variable 'AZURE_AI_CHAT_ENDPOINT' or 'AZURE_AI_CHAT_KEY'")
print("Set them before running this sample.")
exit()
fromazure.ai.inferenceimportChatCompletionsClient
fromazure.core.credentialsimportAzureKeyCredential
client=ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key))
# [START get_model_info]
model_info=client.get_model_info()
print(f"Model name: {model_info.model_name}")
print(f"Model provider name: {model_info.model_provider_name}")
print(f"Model type: {model_info.model_type}")
# [END get_model_info]
if__name__=="__main__":
sample_get_model_info()