forked from Skyvern-AI/skyvern
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
209 lines (204 loc) · 10.1 KB
/
docker-compose.yml
File metadata and controls
209 lines (204 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
services:
postgres:
image: postgres:14-alpine
restart: always
# comment out if you want to externally connect DB
# ports:
# - 5432:5432
volumes:
- ./postgres-data:/var/lib/postgresql/data
environment:
- PGDATA=/var/lib/postgresql/data/pgdata
- POSTGRES_USER=skyvern
- POSTGRES_PASSWORD=skyvern
- POSTGRES_DB=skyvern
healthcheck:
test: ["CMD-SHELL", "pg_isready -U skyvern"]
interval: 5s
timeout: 5s
retries: 5
skyvern:
image: public.ecr.aws/skyvern/skyvern:latest
restart: on-failure
env_file:
- .env
# comment out if you want to externally call skyvern API
ports:
- 8000:8000
- 9222:9222 # for cdp browser forwarding
volumes:
- ./artifacts:/data/artifacts
- ./videos:/data/videos
- ./har:/data/har
- ./log:/data/log
- ./.streamlit:/app/.streamlit
# Uncomment the following two lines if you want to connect to any local changes
# - ./skyvern:/app/skyvern
# - ./alembic:/app/alembic
environment:
- DATABASE_STRING=postgresql+psycopg://skyvern:skyvern@postgres:5432/skyvern
- BROWSER_TYPE=chromium-headful
- ENABLE_CODE_BLOCK=true
# - BROWSER_TYPE=cdp-connect
# Use this command to start Chrome with remote debugging:
# "C:\Program Files\Google\Chrome\Application\chrome.exe" --remote-debugging-port=9222 --user-data-dir="C:\chrome-cdp-profile" --no-first-run --no-default-browser-check
# /Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --remote-debugging-port=9222 --user-data-dir="/Users/yourusername/chrome-cdp-profile" --no-first-run --no-default-browser-check
# - BROWSER_REMOTE_DEBUGGING_URL=http://host.docker.internal:9222/
# =========================
# LLM Settings - Recommended to use skyvern CLI, `skyvern init llm` to setup your LLM's
# =========================
# OpenAI Support:
# If you want to use OpenAI as your LLM provider, uncomment the following lines and fill in your OpenAI API key.
# - ENABLE_OPENAI=true
# - LLM_KEY=OPENAI_GPT4O
# - OPENAI_API_KEY=<your_openai_key>
# Gemini Support:
# Gemini is a new LLM provider that is currently in beta. You can use it by uncommenting the following lines and filling in your Gemini API key.
# - LLM_KEY=GEMINI
# - ENABLE_GEMINI=true
# - GEMINI_API_KEY=YOUR_GEMINI_KEY
# - LLM_KEY=GEMINI_2.5_PRO_PREVIEW_03_25
# If you want to use other LLM provider, like azure and anthropic:
# - ENABLE_ANTHROPIC=true
# - LLM_KEY=ANTHROPIC_CLAUDE3.5_SONNET
# - ANTHROPIC_API_KEY=<your_anthropic_key>
# Microsoft Azure OpenAI support:
# If you'd like to use Microsoft Azure OpenAI as your managed LLM service integration with Skyvern, use the environment variables below.
# In your Microsoft Azure subscription, you will need to provision the OpenAI service and deploy a model, in order to utilize it.
# 1. Login to the Azure Portal
# 2. Create an Azure Resource Group
# 3. Create an OpenAI resource in the Resource Group (choose a region and pricing tier)
# 4. From the OpenAI resource's Overview page, open the "Azure AI Foundry" portal (click the "Explore Azure AI Foundry Portal" button)
# 5. In Azure AI Foundry, click "Shared Resources" --> "Deployments"
# 6. Click "Deploy Model" --> "Deploy Base Model" --> select a model (specify this model "Deployment Name" value for the AZURE_DEPLOYMENT variable below)
# - ENABLE_AZURE=true
# - LLM_KEY=AZURE_OPENAI # Leave this value static, don't change it
# - AZURE_DEPLOYMENT=<your_azure_deployment> # Use the OpenAI model "Deployment Name" that you deployed, using the steps above
# - AZURE_API_KEY=<your_azure_api_key> # Copy and paste Key1 or Key2 from the OpenAI resource in Azure Portal
# - AZURE_API_BASE=<your_azure_api_base> # Copy and paste the "Endpoint" from the OpenAI resource in Azure Portal (eg. https://xyzxyzxyz.openai.azure.com/)
# - AZURE_API_VERSION=<your_azure_api_version> # Specify a valid Azure OpenAI data-plane API version (eg. 2024-08-01-preview) Docs: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference
# Amazon Bedrock Support:
# Amazon Bedrock is a managed service that enables you to invoke LLMs and bill them through your AWS account.
# To use Amazon Bedrock as the LLM provider for Skyvern, specify the following environment variables.
# 1. In the AWS IAM console, create a new AWS IAM User (name it whatever you want)
# 2. Assign the "AmazonBedrockFullAccess" policy to the user
# 3. Generate an IAM Access Key under the IAM User's Security Credentials tab
# 4. In the Amazon Bedrock console, go to "Model Access"
# 5. Click Modify Model Access button
# 6. Enable "Claude 3.5 Sonnet v2" and save changes
# - ENABLE_BEDROCK=true
# - LLM_KEY=BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET # This is the Claude 3.5 Sonnet "V2" model. Change to BEDROCK_ANTHROPIC_CLAUDE3.5_SONNET_V1 for the non-v2 version.
# - AWS_REGION=us-west-2 # Replace this with a different AWS region, if you desire
# - AWS_ACCESS_KEY_ID=FILL_ME_IN_PLEASE
# - AWS_SECRET_ACCESS_KEY=FILL_ME_IN_PLEASE
# Ollama Support:
# Ollama is a local LLM provider that can be used to run models locally on your machine.
# - LLM_KEY=OLLAMA
# - ENABLE_OLLAMA=true
# - OLLAMA_MODEL=qwen2.5:7b-instruct
# - OLLAMA_SERVER_URL=http://host.docker.internal:11434
# Open Router Support:
# - ENABLE_OPENROUTER=true
# - LLM_KEY=OPENROUTER
# - OPENROUTER_API_KEY=<your_openrouter_api_key>
# - OPENROUTER_MODEL=mistralai/mistral-small-3.1-24b-instruct
# Groq Support:
# - ENABLE_GROQ=true
# - LLM_KEY=GROQ
# - GROQ_API_KEY=<your_groq_api_key>
# - GROQ_MODEL=llama-3.1-8b-instant
# Maximum tokens to use: (only set for OpenRouter and Ollama)
# - LLM_CONFIG_MAX_TOKENS=128000
# Bitwarden Settings
# If you are looking to integrate Skyvern with a password manager (eg Bitwarden), you can use the following environment variables.
# - BITWARDEN_SERVER=http://localhost # OPTIONAL IF YOU ARE SELF HOSTING BITWARDEN
# - BITWARDEN_SERVER_PORT=8002 # IF YOU ARE SELF HOSTING BITWARDEN AND USE THIS COMPOSE FILE, PORT IS 8002 UNLESS CHANGED
# - SKYVERN_AUTH_BITWARDEN_ORGANIZATION_ID=your-org-id-here
# - SKYVERN_AUTH_BITWARDEN_CLIENT_ID=user.your-client-id-here
# - SKYVERN_AUTH_BITWARDEN_CLIENT_SECRET=your-client-secret-here
# - SKYVERN_AUTH_BITWARDEN_MASTER_PASSWORD=your-master-password-here
# 1Password Integration
# If you are looking to integrate Skyvern with 1Password, you can use the following environment variables.
# OP_SERVICE_ACCOUNT_TOKEN=""
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "test", "-f", "/app/.streamlit/secrets.toml"]
interval: 5s
timeout: 5s
retries: 5
skyvern-ui:
image: public.ecr.aws/skyvern/skyvern-ui:latest
restart: on-failure
ports:
- 8080:8080
- 9090:9090
volumes:
- ./artifacts:/data/artifacts
- ./videos:/data/videos
- ./har:/data/har
- ./.streamlit:/app/.streamlit
env_file:
- skyvern-frontend/.env
environment: {}
# - VITE_ENABLE_CODE_BLOCK=true
# if you want to run skyvern on a remote server,
# you need to change the host in VITE_WSS_BASE_URL and VITE_API_BASE_URL to match your server ip
# If you're self-hosting this behind a dns, you'll want to set:
# A route for the API: api.yourdomain.com -> localhost:8000
# A route for the UI: yourdomain.com -> localhost:8080
# A route for the artifact API: artifact.yourdomain.com -> localhost:9090 (maybe not needed)
# - VITE_WSS_BASE_URL=ws://localhost:8000/api/v1
# - VITE_ARTIFACT_API_BASE_URL=http://localhost:9090
# - VITE_API_BASE_URL=http://localhost:8000/api/v1
# - VITE_SKYVERN_API_KEY=<get this from "settings" in the Skyvern UI>
depends_on:
skyvern:
condition: service_healthy
# uncomment for local usage of `vaultwarden` & bitwarden-cli - see more at: https://github.com/dani-garcia/vaultwarden
# First this container needs to be started and configured to sign up, create master password and organization
# Once created, under SETTINGS/SECURITY/KEYS/API you should be able to get client id and secret for CLI & Skyvern integrations
# vaultwarden:
# image: vaultwarden/server:latest-alpine
# container_name: vaultwarden
# restart: unless-stopped
# environment:
# # DOMAIN: "https://vaultwarden.example.com" # required when using a reverse proxy; your domain; vaultwarden needs to know it's https to work properly with attachments
# SIGNUPS_ALLOWED: "true" # Deactivate this with "false" after you have created your account so that no strangers can register
# volumes:
# - ~/vw-data/:/data/ # the path before the : can be changed
# ports:
# - 127.0.0.1:11002:80 # you can replace the 11002 with your preferred port
# Bitwarden CLI Server (provides REST API endpoints for Skyvern)
# Once you have master password and api credentials, you can set them below and this CLI should start providing secure access for Skyvern to Vaultwarden
# bitwarden-cli:
# build:
# context: ./bitwarden-cli-server
# dockerfile: Dockerfile
# environment:
# # Vaultwarden server URL
# BW_HOST: "http://vaultwarden"
# # API credentials for vaultwarden
# BW_CLIENTID: "user.your-client-id-here"
# BW_CLIENTSECRET: "your-client-secret-here"
# # Master password for unlocking vault
# BW_PASSWORD: "your-master-password-here"
# ports:
# # Bind to localhost only for security
# - "127.0.0.1:8002:8087"
# restart: unless-stopped
# healthcheck:
# test: [ "CMD", "curl", "-f", "http://localhost:8087/status" ]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 30s
# depends_on:
# vaultwarden:
# condition: service_healthy
# volumes:
# # Optional: persist Bitwarden CLI config
# - ~/bitwarden-cli-config:/app/.config
# labels:
# - "traefik.enable=false" # Don't expose via reverse proxy