Skip to content

Commit 8d80e0e

Browse files
committed
docs-rs: provision watcher ecs task
1 parent a44c18a commit 8d80e0e

File tree

3 files changed

+152
-2
lines changed

3 files changed

+152
-2
lines changed

terragrunt/modules/docs-rs/rds.tf

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ resource "aws_security_group" "db" {
2222
from_port = 5432
2323
to_port = 5432
2424
protocol = "tcp"
25-
description = "Connections from web servers on ECS, bastion, and builder"
26-
security_groups = [aws_security_group.web.id, var.bastion_security_group_id, aws_security_group.builder.id]
25+
description = "Connections from web servers on ECS, watcher, bastion, and builder"
26+
security_groups = [aws_security_group.web.id, aws_security_group.watcher.id, var.bastion_security_group_id, aws_security_group.builder.id]
2727
}
2828
}
2929

Lines changed: 146 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,146 @@
1+
# The watcher is a long-running background daemon (docs_rs_watcher binary) that:
2+
# - polls the crates.io registry index for new crate publishes and queues builds
3+
# - periodically updates GitHub/GitLab repository stats
4+
# - queues automatic rebuilds of old releases
5+
6+
# Dedicated security group so we can grant the watcher DB access
7+
resource "aws_security_group" "watcher" {
8+
vpc_id = var.cluster_config.vpc_id
9+
name = "docs-rs-watcher"
10+
}
11+
12+
# The watcher doesn't serve HTTP, so we use ecs-task + raw aws_ecs_service directly
13+
# instead of the ecs-app module, which requires expose_http and always creates
14+
# a load balancer target group.
15+
module "watcher_task" {
16+
source = "../ecs-task"
17+
18+
name = "docs-rs-watcher"
19+
container_name = "app"
20+
cpu = 256
21+
memory = 512
22+
23+
environment_variables = {
24+
DOCSRS_PREFIX = "/tmp"
25+
REGISTRY_INDEX_PATH = "/tmp/crates.io-index"
26+
27+
DOCSRS_STORAGE_BACKEND = "s3"
28+
S3_REGION = "us-east-1"
29+
DOCSRS_S3_BUCKET = aws_s3_bucket.storage.id
30+
31+
DOCSRS_LOG = "docs_rs=debug,rustwide=info"
32+
RUST_BACKTRACE = "1"
33+
34+
# - How often to poll the registry for new crates (seconds)
35+
DOCSRS_DELAY_BETWEEN_REGISTRY_FETCHES = "60"
36+
# - Cap on how many old releases to queue for rebuild per hourly cycle
37+
DOCSRS_MAX_QUEUED_REBUILDS = "10"
38+
}
39+
40+
secrets = {
41+
# Used by docs_rs_repository_stats to call the GitHub API when updating
42+
# repository stars/forks/etc.
43+
DOCSRS_GITHUB_ACCESSTOKEN = "/docs-rs/github-access-token"
44+
}
45+
46+
computed_secrets = {
47+
# The watcher needs direct DB access: it writes to the build queue,
48+
# updates repository stats, and runs consistency checks.
49+
DOCSRS_DATABASE_URL = aws_ssm_parameter.connection_url.arn
50+
}
51+
52+
# No HTTP — the watcher is a background daemon with no exposed ports.
53+
port_mappings = []
54+
docker_labels = {}
55+
}
56+
57+
resource "aws_ecs_service" "watcher" {
58+
name = "docs-rs-watcher"
59+
cluster = var.cluster_config.cluster_id
60+
task_definition = module.watcher_task.task_arn
61+
# Exactly 1 replica: the registry watcher must be a singleton, otherwise
62+
# crates would be added to the build queue multiple times.
63+
desired_count = 1
64+
launch_type = "FARGATE"
65+
platform_version = "1.4.0"
66+
67+
enable_ecs_managed_tags = true
68+
69+
# No load_balancer block — the watcher has no HTTP endpoint.
70+
network_configuration {
71+
subnets = var.cluster_config.subnet_ids
72+
security_groups = [
73+
var.cluster_config.service_security_group_id,
74+
aws_security_group.watcher.id,
75+
]
76+
# Public IP needed to reach external services (crates.io index, GitHub API,
77+
# ECR, SSM) without NAT gateway — same pattern as the web service.
78+
assign_public_ip = true
79+
}
80+
}
81+
82+
# S3 permissions: the watcher deletes documentation from S3 when removing
83+
# crates/versions (delete_crate, delete_version) and reads/writes via the
84+
# storage abstraction layer.
85+
resource "aws_iam_role_policy" "watcher_s3" {
86+
role = module.watcher_task.task_execution_role_id
87+
name = "inline"
88+
89+
policy = jsonencode({
90+
Version = "2012-10-17"
91+
Statement = [
92+
{
93+
Effect = "Allow"
94+
Action = [
95+
"s3:PutObject",
96+
"s3:GetObjectTagging",
97+
"s3:PutObjectTagging",
98+
"s3:GetObject",
99+
"s3:PutObjectAcl",
100+
"s3:ListBucket",
101+
"s3:DeleteObject",
102+
]
103+
Resource = [
104+
aws_s3_bucket.storage.arn,
105+
"${aws_s3_bucket.storage.arn}/*",
106+
]
107+
}
108+
]
109+
})
110+
}
111+
112+
# CI: reuse the web module's OIDC role rather than creating a new one.
113+
# The gha-oidc-role module derives the IAM role name from org/repo/environment,
114+
# so a second module instance for the same rust-lang/docs.rs repo would
115+
# conflict. Instead we attach the watcher's ECR and ECS permissions to the
116+
# existing role.
117+
resource "aws_iam_role_policy" "watcher_oidc_update_service" {
118+
name = "update-ecs-service-watcher"
119+
role = module.web.oidc_role_id
120+
121+
policy = jsonencode({
122+
Version = "2012-10-17"
123+
Statement = [
124+
{
125+
Sid = "AllowUpdate"
126+
Effect = "Allow"
127+
Action = [
128+
"ecr:GetAuthorizationToken",
129+
"ecs:UpdateService",
130+
"ecs:DescribeServices",
131+
]
132+
Resource = aws_ecs_service.watcher.id
133+
}
134+
]
135+
})
136+
}
137+
138+
resource "aws_iam_role_policy_attachment" "watcher_oidc_ci_pull" {
139+
role = module.web.oidc_role_id
140+
policy_arn = module.watcher_task.policy_pull_arn
141+
}
142+
143+
resource "aws_iam_role_policy_attachment" "watcher_oidc_ci_push" {
144+
role = module.web.oidc_role_id
145+
policy_arn = module.watcher_task.policy_push_arn
146+
}
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
11
output "role_id" {
22
value = module.ecs_task.task_execution_role_id
33
}
4+
5+
output "oidc_role_id" {
6+
value = local.oidc_role_id
7+
}

0 commit comments

Comments
 (0)