|
| 1 | +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. |
| 2 | +# SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +# snippet-start:[python.example_code.bedrock-runtime.InvokeModel_TitanText] |
| 5 | +# Use the native inference API to send a text message to Amazon Titan Text. |
| 6 | + |
| 7 | +import boto3 |
| 8 | +import json |
| 9 | + |
| 10 | +from botocore.exceptions import ClientError |
| 11 | + |
| 12 | +# Create a Bedrock Runtime client in the AWS Region of your choice. |
| 13 | +client = boto3.client("bedrock-runtime", region_name="us-east-1") |
| 14 | + |
| 15 | +# Set the model ID, e.g., Titan Text Premier. |
| 16 | +model_id = "amazon.titan-text-premier-v1:0" |
| 17 | + |
| 18 | +# Define the prompt for the model. |
| 19 | +prompt = "Describe the purpose of a 'hello world' program in one line." |
| 20 | + |
| 21 | +# Format the request payload using the model's native structure. |
| 22 | +native_request = { |
| 23 | + "inputText": prompt, |
| 24 | + "textGenerationConfig": { |
| 25 | + "maxTokenCount": 512, |
| 26 | + "temperature": 0.5, |
| 27 | + }, |
| 28 | +} |
| 29 | + |
| 30 | +# Convert the native request to JSON. |
| 31 | +request = json.dumps(native_request) |
| 32 | + |
| 33 | +try: |
| 34 | + # Invoke the model with the request. |
| 35 | + response = client.invoke_model(modelId=model_id, body=request) |
| 36 | + |
| 37 | +except (ClientError, Exception) as e: |
| 38 | + print(f"ERROR: Can't invoke '{model_id}'. Reason: {e}") |
| 39 | + exit(1) |
| 40 | + |
| 41 | +# Decode the response body. |
| 42 | +model_response = json.loads(response["body"].read()) |
| 43 | + |
| 44 | +# Extract and print the response text. |
| 45 | +response_text = model_response["results"][0]["outputText"] |
| 46 | +print(response_text) |
| 47 | + |
| 48 | +# snippet-end:[python.example_code.bedrock-runtime.InvokeModel_TitanText] |
0 commit comments