forked from awsdocs/aws-doc-sdk-examples
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscenario_invoke_models.go
More file actions
150 lines (123 loc) · 4.92 KB
/
scenario_invoke_models.go
File metadata and controls
150 lines (123 loc) · 4.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package scenarios
// snippet-start:[gov2.bedrock-runtime.Scenario_InvokeModels]
import (
"context"
"encoding/base64"
"fmt"
"log"
"math/rand"
"os"
"path/filepath"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
"github.com/awsdocs/aws-doc-sdk-examples/gov2/bedrock-runtime/actions"
"github.com/awsdocs/aws-doc-sdk-examples/gov2/demotools"
)
// InvokeModelsScenario demonstrates how to use the Amazon Bedrock Runtime client
// to invoke various foundation models for text and image generation
//
// 1. Generate text with Anthropic Claude 2
// 2. Generate text with Meta Llama 2 Chat
// 3. Generate text and asynchronously process the response stream with Anthropic Claude 2
// 4. Generate an image with the Amazon Titan image generation model
type InvokeModelsScenario struct {
sdkConfig aws.Config
invokeModelWrapper actions.InvokeModelWrapper
responseStreamWrapper actions.InvokeModelWithResponseStreamWrapper
questioner demotools.IQuestioner
}
// NewInvokeModelsScenario constructs an InvokeModelsScenario instance from a configuration.
// It uses the specified config to get a Bedrock Runtime client and create wrappers for the
// actions used in the scenario.
func NewInvokeModelsScenario(sdkConfig aws.Config, questioner demotools.IQuestioner) InvokeModelsScenario {
client := bedrockruntime.NewFromConfig(sdkConfig)
return InvokeModelsScenario{
sdkConfig: sdkConfig,
invokeModelWrapper: actions.InvokeModelWrapper{BedrockRuntimeClient: client},
responseStreamWrapper: actions.InvokeModelWithResponseStreamWrapper{BedrockRuntimeClient: client},
questioner: questioner,
}
}
// Runs the interactive scenario.
func (scenario InvokeModelsScenario) Run(ctx context.Context) {
defer func() {
if r := recover(); r != nil {
log.Printf("Something went wrong with the demo: %v\n", r)
}
}()
log.Println(strings.Repeat("=", 77))
log.Println("Welcome to the Amazon Bedrock Runtime model invocation demo.")
log.Println(strings.Repeat("=", 77))
log.Printf("First, let's invoke a few large-language models using the synchronous client:\n\n")
text2textPrompt := "In one paragraph, who are you?"
log.Println(strings.Repeat("-", 77))
log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt)
scenario.InvokeClaude(ctx, text2textPrompt)
log.Println(strings.Repeat("=", 77))
log.Printf("Now, let's invoke Claude with the asynchronous client and process the response stream:\n\n")
log.Println(strings.Repeat("-", 77))
log.Printf("Invoking Claude with prompt: %v\n", text2textPrompt)
scenario.InvokeWithResponseStream(ctx, text2textPrompt)
log.Println(strings.Repeat("=", 77))
log.Printf("Now, let's create an image with the Amazon Titan image generation model:\n\n")
text2ImagePrompt := "stylized picture of a cute old steampunk robot"
seed := rand.Int63n(2147483648)
log.Println(strings.Repeat("-", 77))
log.Printf("Invoking Amazon Titan with prompt: %v\n", text2ImagePrompt)
scenario.InvokeTitanImage(ctx, text2ImagePrompt, seed)
log.Println(strings.Repeat("=", 77))
log.Println("Thanks for watching!")
log.Println(strings.Repeat("=", 77))
}
func (scenario InvokeModelsScenario) InvokeClaude(ctx context.Context, prompt string) {
completion, err := scenario.invokeModelWrapper.InvokeClaude(ctx, prompt)
if err != nil {
panic(err)
}
log.Printf("\nClaude : %v\n", strings.TrimSpace(completion))
}
func (scenario InvokeModelsScenario) InvokeWithResponseStream(ctx context.Context, prompt string) {
log.Println("\nClaude with response stream:")
_, err := scenario.responseStreamWrapper.InvokeModelWithResponseStream(ctx, prompt)
if err != nil {
panic(err)
}
log.Println()
}
func (scenario InvokeModelsScenario) InvokeTitanImage(ctx context.Context, prompt string, seed int64) {
base64ImageData, err := scenario.invokeModelWrapper.InvokeTitanImage(ctx, prompt, seed)
if err != nil {
panic(err)
}
imagePath := saveImage(base64ImageData, "amazon.titan-image-generator-v2")
fmt.Printf("The generated image has been saved to %s\n", imagePath)
}
// snippet-end:[gov2.bedrock-runtime.Scenario_InvokeModels]
func saveImage(base64ImageData string, modelId string) string {
outputDir := "output"
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
err = os.MkdirAll(outputDir, 0755)
if err != nil {
log.Panicln("Couldn't create output folder: ", err)
}
}
i := 1
for {
if _, err := os.Stat(filepath.Join(outputDir, fmt.Sprintf("%s_%d.png", modelId, i))); os.IsNotExist(err) {
break
}
i++
}
imageData, _ := base64.StdEncoding.DecodeString(base64ImageData)
filePath := filepath.Join(outputDir, fmt.Sprintf("%s_%d.png", modelId, i))
f, _ := os.Create(filePath)
_, err := f.Write(imageData)
if err != nil {
log.Printf("Couldn't write image to file %v: %v\n", filePath, err)
}
_ = f.Close()
return filePath
}