Nova - Converse API

同claude模型一样,Nova也支持Converse API。

单轮对话

import json
import boto3
import base64

client = boto3.client(service_name='bedrock-runtime', region_name="us-east-1")

PRO_MODEL_ID = "us.amazon.nova-pro-v1:0"
LITE_MODEL_ID = "us.amazon.nova-lite-v1:0"
MICRO_MODEL_ID = "us.amazon.nova-micro-v1:0"

model_response = client.converse(
    modelId=LITE_MODEL_ID,
    messages=[
        {"role": "user", "content": [{"text": "a child graduating from high school"}]},
    ],
    system=[
        {
            "text": "You are an experienced publisher. For each user topic, respond with 3 potential book titles"
        }
    ], inferenceConfig={"maxTokens": 300, "topP": 0.1, "temperature": 0.3}
)


print("\n[Response Content Text]")
print(model_response["output"]["message"]["content"][0]["text"])

image-20241215210826998

多轮对话

import json
import boto3
import base64

client = boto3.client(service_name='bedrock-runtime', region_name="us-east-1")

PRO_MODEL_ID = "us.amazon.nova-pro-v1:0"
LITE_MODEL_ID = "us.amazon.nova-lite-v1:0"
MICRO_MODEL_ID = "us.amazon.nova-micro-v1:0"

model_response = client.converse(
    modelId=LITE_MODEL_ID,
    messages=[
        {"role": "user", "content": [{"text": "How many days are in a week?"}]},
        {"role": "assistant", "content": [{"text": "There are seven days in a week"}]},
        {"role": "user", "content": [{"text": "Which day is the first?"}]},
    ],
    inferenceConfig={"maxTokens": 300, "topP": 0.1, "temperature": 0.3}
)

print("\n[Full Response]")
print(json.dumps(model_response, indent=2))

print(model_response["output"]["message"]["content"][0]["text"])

输出:

image-20241215211129381

流式传输(Streaming API)

import json
import boto3
import base64

client = boto3.client(service_name='bedrock-runtime', region_name="us-east-1")

PRO_MODEL_ID = "us.amazon.nova-pro-v1:0"
LITE_MODEL_ID = "us.amazon.nova-lite-v1:0"
MICRO_MODEL_ID = "us.amazon.nova-micro-v1:0"

model_response = client.converse_stream(
    modelId=LITE_MODEL_ID,
    messages=[
        {"role": "user", "content": [{"text": "A camping trip"}]},
    ],
    system=[
        {
            "text": "Act as a creative writing assistant. When the user provides you with a topic, provide a list of 3 potential titles for a short story based on that topic."
        }
    ],
    inferenceConfig={"maxTokens": 300, "topP": 0.1, "temperature": 0.3}
)

stream = model_response.get("stream")
if stream:
    for event in stream:
        if "contentBlockDelta" in event:
            print(event["contentBlockDelta"]["delta"]["text"], end="")

image-20241215211339756

图像理解

import json
import boto3
import base64

client = boto3.client(service_name='bedrock-runtime', region_name="us-east-1")

PRO_MODEL_ID = "us.amazon.nova-pro-v1:0"
LITE_MODEL_ID = "us.amazon.nova-lite-v1:0"
MICRO_MODEL_ID = "us.amazon.nova-micro-v1:0"

with open("sunset.png", "rb") as f:
    image = f.read()

model_response = client.converse(
    modelId=LITE_MODEL_ID,
    messages=[
        {
            "role": "user",
            "content": [
                {"image": {"format": "png", "source": {"bytes": image}}},
                {"text": "Describe the following image"},
            ],
        }
    ],
    inferenceConfig={"maxTokens": 300, "topP": 0.1, "temperature": 0.3}
)

print("\n[Full Response]")
print(json.dumps(model_response, indent=2))

print(model_response["output"]["message"]["content"][0]["text"])

image-20241215211443788

视频理解

import json
import boto3
import base64

client = boto3.client(service_name='bedrock-runtime', region_name="us-east-1")

PRO_MODEL_ID = "us.amazon.nova-pro-v1:0"
LITE_MODEL_ID = "us.amazon.nova-lite-v1:0"
MICRO_MODEL_ID = "us.amazon.nova-micro-v1:0"

with open("the-sea.mp4", "rb") as file:
    media_bytes = file.read()
    media_base64 = base64.b64encode(media_bytes)

model_response = client.converse(
    modelId=LITE_MODEL_ID,
    messages=[
        {
            "role": "user",
            "content": [
                {"video": {"format": "mp4", "source": {"bytes": media_bytes}}},
                {"text": "Describe the following video"},
            ],
        }
    ],
    inferenceConfig={"maxTokens": 300, "topP": 0.1, "temperature": 0.3}
)

print("\n[Full Response]")
print(json.dumps(model_response, indent=2))

print("\n[Response Content Text]")
print(model_response["output"]["message"]["content"][0]["text"])

运行结果:

image-20241215211608596