Python Notes 1 - introduction with the OpenAI API Development
Official document:https://platform.openai.com/docs/api-reference/chat/create
1. Use APIfox to call APIs
2.Use PyCharm to call APIs
2.1-1 WIN OS.Configure the Enviorment variable
#HK代理环境,不需要科学上网(价格便宜、有安全风险,适合个人开发调试)
setx OPENAI_BASE_URL "https://api.openai-hk.com/v1"
setx OPENAI_API_KEY "hk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"#官方环境,需要科学上网(价格高、安全可靠,适合个人企业生产部署)
setx OPENAI_BASE_URL "https://api.openai.com/v1"
setx OPENAI_API_KEY "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
Temporaily use the API through HK proxy.
Check if environment variable is configured successfully.
import os
print(os.getenv('OPENAI_BASE_URL'))
2.1-2 MAC OS Configure the Environment variable
#HK代理环境,不需要科学上网
export OPENAI_BASE_URL='https://api.openai-hk.com/v1'
export OPENAI_API_KEY='hk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'#官方环境,需要科学上网
export OPENAI_BASE_URL='https://api.openai.com/v1'
export OPENAI_API_KEY='sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
if Mac os is unable to read the configured environment variables,u need to modify the .zshrc file.
Refer to the URL below.
MacOS环境变量source生效但重启后又失效 - 程序员小艺 - 博客园
2.2 Call an API using PyCharm
2.2.1.as APIfox(HTTPClient):Make a POST request to call the API,similar to how it's done in APIfox.
import requests
import json
import os
url = os.getenv('OPENAI_BASE_URL') + "/chat/completions"
# print(os.getenv('OPENAI_BASE_URL'))
payload = json.dumps({
"model": "gpt-4o",
"messages": [
{"role": "system", "content": "assistant"},
{"role": "user", "content": "Hello"}
]
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + os.getenv('OPENAI_API_KEY'),
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
2.2.2 Use the offical SDK to call the API
First,domwload the OpenAI package using the console and import it.
# pip install openai==1.40.3
Call the API using the SDK
from openai import OpenAI
# pip install openai==1.40.3
import os
# 从环境变量中读取OPENAI_BASE_URL
print(os.getenv('OPENAI_BASE_URL'))
# 初始化 OpenAI 服务。
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "assistant"},
{"role": "user", "content": "Hello"}
]
)
print(completion.choices[0].message.content)
There was a problem when running the code
Traceback (most recent call last):
File "D:\BaiduNetdiskDownload\AIapp\sub\1_AI及LLM基础\day02_OpenAI 开发\day2-demo\sdk_call.py", line 7, in <module>
clien = OpenAI()
File "C:\Users\Administrator\PycharmProjects\PythonProject\.venv_learn\Lib\site-packages\openai\_client.py", line 123, in __init__
super().__init__(
~~~~~~~~~~~~~~~~^
version=__version__,
^^^^^^^^^^^^^^^^^^^^
...<6 lines>...
_strict_response_validation=_strict_response_validation,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "C:\Users\Administrator\PycharmProjects\PythonProject\.venv_learn\Lib\site-packages\openai\_base_client.py", line 843, in __init__
self._client = http_client or SyncHttpxClientWrapper(
~~~~~~~~~~~~~~~~~~~~~~^
base_url=base_url,
^^^^^^^^^^^^^^^^^^
...<5 lines>...
follow_redirects=True,
^^^^^^^^^^^^^^^^^^^^^^
)
^
File "C:\Users\Administrator\PycharmProjects\PythonProject\.venv_learn\Lib\site-packages\openai\_base_client.py", line 741, in __init__
super().__init__(**kwargs)
~~~~~~~~~~~~~~~~^^^^^^^^^^
TypeError: Client.__init__() got an unexpected keyword argument 'proxies'
The error could be caused by an outdated SDK versiong,trying to update SDK version
pip install --upgrade openai
After updating the SKD ,the code now runs successfully.
SDK provide a more convenient interface;under the hood,they rely on the request libary for making HTTP request
2.3 Call the embedding(嵌入 向量数据库) API in Python
embedding transforms prompt words into vector representations(向量表示)
2.3.1 request(HTTP)
One advantage of using the request library is that it provides access to the full response,including headers,status code and body.
import requests
import json
import os
url = os.getenv('OPENAI_BASE_URL') + "/embeddings"
payload = json.dumps({
"model": "text-embedding-ada-002",
"input": "cat"
})
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + os.getenv('OPENAI_API_KEY'),
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
2.3.2 SDK
Meanwhile,SDK typically provide only pre-processed or pre-packaged object,which may limit access to the raw response date(原始数据). Tip:press Ctrl+B jump to the method
from openai import OpenAI
# 初始化 OpenAI 服务。
client = OpenAI()
# 调用嵌入 API
def get_embedding(text, model="text-embedding-ada-002"):
response = client.embeddings.create(
input=text,
model=model
)
return response.data[0].embedding
# 示例文本
text = "Hello, world!"
# 获取嵌入向量
embedding = get_embedding(text)
print("Embedding vector:", embedding)
2.4 Call the vision-endabled GPT4o using a local/online image
2.4.1 request local image
use # to comment code
import os
import base64 #use base64 transform image
import requests
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
# Path to your image
image_path = "../images/cat.jpeg"
# Getting the base64 string
base64_image = encode_image(image_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer " + os.getenv('OPENAI_API_KEY')
}
payload = {
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "这张照片里有什么?"
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"#input base64 format to url
}
}
]
}
],
"max_tokens": 300
}
response = requests.post(os.getenv('OPENAI_BASE_URL') + "/chat/completions", headers=headers, json=payload)
print(response.json())
2.4.2 SDK online
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "这张照片里有什么?"},
{
"type": "image_url",
"image_url": {
"url": "https://p7.itc.cn/q_70/images03/20220805/7a369d8407144b11bfd598091095c959.jpeg",
#"url": f"data:image/jpeg;base64,{base64_image}"
},
},
],
}
],
max_tokens=50,
)
print(response.choices[0])
2.5 Respond whit the specified Json schema
JSON is well-suited for converting data into objects
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
response_format={"type": "json_object"},#ensure respond is JSON schcema
messages=[
{"role": "system", "content": "你是一个助手,请用中文输出JSON"},
{"role": "user", "content": "帮我写一个冒泡算法?"}
]
)
print(response.choices[0].message.content)
before not using JSON schema(more suitable for page display or user reading
after using JSON schema
2.6 Seed for reproducible output(重现输出)
As the seed value adcreases,the differences become more pronounced(obvious明显)
from openai import OpenAI
client = OpenAI()
for i in range(3):
response = client.chat.completions.create(
model="gpt-4o",
# 对于三个请求中的每一个,使用相同的 seed 参数 42,同时将所有其他参数保持相同,我们便能够生成更一致的结果。
seed=2, #种子
temperature=0.7,
max_tokens=50,
messages=[
{"role": "system", "content": "你是一个生成故事机器人"},
{"role": "user", "content": "告诉我一个关于宇宙是如何开始的故事?"}
]
)
print(f'故事版本{i + 1}:' + response.choices[0].message.content)
del response #this statement is not necessary and can be omitted
seed = 2
seed = 40
2.7 Count tokens
Count tokens in the development console for looped chat.
Need to install the tiktoken libary
pip install --upgrade tiktoken
from openai import OpenAI
# pip install --upgrade tiktoken
#tiktoken 用来统计token使用
import tiktoken#package for count token
client = OpenAI()
# 初始化 tiktoken 编码器
encoder = tiktoken.encoding_for_model("gpt-4")
def count_tokens(text):
encoder.encode(text)
# 将输入的文本text转换为对应的token列表。具体来说,它使用tiktoken库中的编码器将文本进行编码,以便后续处理。
tokens = encoder.encode(text)
# 统计文本中的 token 数量
return len(tokens)
def main():
# 初始化聊天记录
messages = [
{"role": "system", "content": "You are a helpful assistant."}
]
print("开始聊天吧!输入 'exit' 退出。")
total_tokens = 0
while True:
# 获取用户输入
user_input = input("用户: ")
if user_input.lower() == 'exit':
break
# 添加用户消息到聊天记录
messages.append({"role": "user", "content": user_input})
# 统计用户输入的 token 数量并累加
user_tokens = count_tokens(user_input)
total_tokens += user_tokens
# 调用 GPT-4 模型
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
max_tokens=150,
temperature=0.7,
top_p=1,
n=1
)
# 获取助手的回复
assistant_message = response.choices[0].message.content.strip()
# 添加助手的回复到聊天记录
messages.append({"role": "assistant", "content": assistant_message})
# 统计助手回复的 token 数量并累加
assistant_tokens = count_tokens(assistant_message)
total_tokens += assistant_tokens
# 输出用户输入和助手的回复
print(f"助手: {assistant_message}")
# 输出当前聊天记录的总 token 数量
print(f"用户tokens数: {user_tokens},助手tokens数: {assistant_tokens},总token数: {total_tokens}")
if __name__ == "__main__":#Only when the module is run as the main program directly
main()
2.8 Consol chat loop with session length management based on maximum token limit
from openai import OpenAI
# pip install tiktoken
import tiktoken
client = OpenAI()
# 这是 API 请求和响应的总 token 数量限制。对于 GPT-4 模型,这个值通常是 4096。
MAX_TOKENS = 8 # 设置最大 token 数量
# 这是我们预留给模型响应的 token 数量。我们需要在计算对话的最大 token 数量时减去这个值,以确保有足够的空间来容纳模型的响应。
MAX_RESPONSE_TOKENS = 6 # 设置响应中预留的最大 token 数量
encoder = tiktoken.encoding_for_model("gpt-4")
def count_tokens(text):
encoder.encode(text)
# 将输入的文本text转换为对应的token列表。具体来说,它使用tiktoken库中的编码器将文本进行编码,以便后续处理。
tokens = encoder.encode(text)
# 统计文本中的 token 数量
return len(tokens)
# 假设 MAX_TOKENS 是 4096,而 MAX_RESPONSE_TOKENS 是 500,那么:
# 我们希望对话历史的 token 数量不要超过 3596 (4096 - 500)。
# 这样,当我们发送对话历史给 API 时,仍然有 500 个 token 的空间用于模型生成的响应。
def manage_token_limit(messages):
current_tokens = count_tokens(messages)
if current_tokens > (MAX_TOKENS - MAX_RESPONSE_TOKENS):
print(f"当前会话 token 数量: {current_tokens}, 超过最大 token 数量: {MAX_TOKENS - MAX_RESPONSE_TOKENS}")
return False
return True
def get_gpt_response(messages):
"""获取 GPT-4 的响应"""
response = client.chat.completions.create(
model="gpt-4",
messages=messages
)
return response.choices[0].message.content.strip()
def main():
print("Chat with GPT-4. Type 'exit' to end the conversation.")
while True:
messages = []#this statement must be inside the while loop to ensure that the variable is reset on each iteration(每次迭代)
user_input = input("用户: ")
if user_input.lower() == 'exit':
break
messages.append({"role": "user", "content": user_input})
# 管理用户输入以确保总 token 数量不超过限制
if not manage_token_limit(user_input):
continue
response = get_gpt_response(messages)
print(f"GPT: {response}")#The role of 'f' is to format string for input variables
messages.append({"role": "assistant", "content": response})
if __name__ == "__main__":
main()