Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ terraform.tfvars
*.tfstate
*.tfstate.backup
*.tfvars
.terraform.lock.hcl

# Terraform directory
.terraform/
Expand All @@ -15,6 +16,8 @@ logs/

# Go vendor directory
vendor/
go.mod
go.sum

# Files generated by terratest
.test-data/
Expand Down
48 changes: 48 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Changelog
All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [5.0.0] - 2022-05-13

### Added
- `node_taints` to default node pool
- Maintenance window supports
- Key Vault secrets provider
- Verbose outputs
- More test
- Attachment to a container registry via `azurerm_role_assignment`

### Changed
- Upgraded to Terraform 1.1.9 from 0.13.0
- Upgraded to azurerm provider 3.x from 2.x
- Split `prefix` and `dns_prefix`, as they might not be the same
- Separated Log Analytics Workspace and Log Analytics Solutions, for users who do not want both resources
- Renamed most variables to be more consistent with resources
- Required azurerm provider to >= 3.0.0 from ~> 2.46
- Split tests into Kubenet and Azure CNI AKS clusters
- Reduced test outputs to bare necessities

### Fixed
- Fixed typos and mistakes in variables descriptions
- `role_based_access_control` has been deprecated in favor of `azure_active_directory_role_based_access_control`
- `addon_profile` block has been deprecated in azurerm 3.0
- Duplicate `default_node_pool` was removed and replaced with in-block ternary
- Renamed CHANGLOG.md to CHANGELOG.md
- `max_node`, `min_node` and `node_count` are not mutually exclusive

### Security
- Deleted in-repo ssh module. This generated a local file on the agent with a private key (highly unsafe!) and was unused. Replaced by a `tls_private_key` block without a file output

### Removed
- `agent_` particle in variables.tf. AKS uses node pools and nodes in their terminology

## [4.15.0] - 2022-05-06
### Added
- Added output for `kube_admin_config_raw` ([#146](https://github.com/Azure/terraform-azurerm-aks/pull/146))
- Include `node_resource_group` as variable ([#136](https://github.com/Azure/terraform-azurerm-aks/pull/136))


[5.0.0]:https://github.com/Azure/terraform-azurerm-aks/releases/tag/5.0.0
[4.15.0]:https://github.com/Azure/terraform-azurerm-aks/releases/tag/4.15.0
8 changes: 0 additions & 8 deletions CHANGLOG.md

This file was deleted.

221 changes: 144 additions & 77 deletions README.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion azure-pipelines.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ pool:

variables:
image_name: terraform-azurerm-aks:$(build.buildId)
terraform_version: 0.13.0
terraform_version: 1.1.9

steps:
- script: docker build --build-arg BUILD_TERRAFORM_VERSION=$(terraform_version) --build-arg BUILD_ARM_SUBSCRIPTION_ID=$(ARM_SUBSCRIPTION_ID) --build-arg BUILD_ARM_CLIENT_ID=$(ARM_CLIENT_ID) --build-arg BUILD_ARM_CLIENT_SECRET=$(ARM_CLIENT_SECRET) --build-arg BUILD_ARM_TENANT_ID=$(ARM_TENANT_ID) -t $(image_name) .
Expand Down
195 changes: 92 additions & 103 deletions main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,158 +2,140 @@ data "azurerm_resource_group" "main" {
name = var.resource_group_name
}

module "ssh-key" {
source = "./modules/ssh-key"
public_ssh_key = var.public_ssh_key == "" ? "" : var.public_ssh_key
resource "tls_private_key" "main" {
algorithm = "RSA"
rsa_bits = 4096
}

resource "azurerm_kubernetes_cluster" "main" {
name = var.cluster_name == null ? "${var.prefix}-aks" : var.cluster_name
name = var.cluster_name != null ? var.cluster_name : var.prefix
kubernetes_version = var.kubernetes_version
location = data.azurerm_resource_group.main.location
resource_group_name = data.azurerm_resource_group.main.name
node_resource_group = var.node_resource_group
dns_prefix = var.prefix
dns_prefix = coalesce(var.dns_prefix, var.prefix, var.cluster_name)
sku_tier = var.sku_tier
private_cluster_enabled = var.private_cluster_enabled
private_dns_zone_id = var.private_dns_zone_id

linux_profile {
admin_username = var.admin_username

ssh_key {
# remove any new lines using the replace interpolation function
key_data = replace(var.public_ssh_key == "" ? module.ssh-key.public_ssh_key : var.public_ssh_key, "\n", "")
key_data = var.public_ssh_key != null ? var.public_ssh_key : tls_private_key.main.public_key_openssh
}
}

dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
content {
orchestrator_version = var.orchestrator_version
name = var.agents_pool_name
node_count = var.agents_count
vm_size = var.agents_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = var.vnet_subnet_id
enable_auto_scaling = var.enable_auto_scaling
max_count = null
min_count = null
enable_node_public_ip = var.enable_node_public_ip
availability_zones = var.agents_availability_zones
node_labels = var.agents_labels
type = var.agents_type
tags = merge(var.tags, var.agents_tags)
max_pods = var.agents_max_pods
enable_host_encryption = var.enable_host_encryption
}
}

dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
content {
orchestrator_version = var.orchestrator_version
name = var.agents_pool_name
vm_size = var.agents_size
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = var.vnet_subnet_id
enable_auto_scaling = var.enable_auto_scaling
max_count = var.agents_max_count
min_count = var.agents_min_count
enable_node_public_ip = var.enable_node_public_ip
availability_zones = var.agents_availability_zones
node_labels = var.agents_labels
type = var.agents_type
tags = merge(var.tags, var.agents_tags)
max_pods = var.agents_max_pods
enable_host_encryption = var.enable_host_encryption
}
default_node_pool {
orchestrator_version = var.orchestrator_version
name = var.default_node_pool_name
node_count = var.node_count
vm_size = var.vm_size
os_sku = var.os_sku
os_disk_type = var.os_disk_type
os_disk_size_gb = var.os_disk_size_gb
vnet_subnet_id = var.vnet_subnet_id
enable_auto_scaling = var.enable_auto_scaling
max_count = var.enable_auto_scaling == true ? var.max_count : null
min_count = var.enable_auto_scaling == true ? var.min_count : null
enable_node_public_ip = var.enable_node_public_ip
zones = var.zones
node_labels = var.node_labels
node_taints = var.node_taints
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

According to terraform-azurerm-provider pr #8982, since the v2.35.0, the node_taints argument is no longer be configured.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I will change this field.

type = var.type
tags = merge(var.tags, var.node_tags)
max_pods = var.max_pods
enable_host_encryption = var.enable_host_encryption
}

dynamic "service_principal" {
for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : []
for_each = var.service_principal_enabled ? { "loop" = "once" } : {}
content {
client_id = var.client_id
client_secret = var.client_secret
}
}

dynamic "identity" {
for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : []
identity {
type = var.identity_type
identity_ids = var.identity_ids
}

azure_policy_enabled = var.azure_policy_enabled
http_application_routing_enabled = var.http_application_routing_enabled

dynamic "oms_agent" {
for_each = try(azurerm_log_analytics_workspace.main[0].id, null) != null ? { "loop" = "once" } : {}

content {
type = var.identity_type
user_assigned_identity_id = var.user_assigned_identity_id
log_analytics_workspace_id = azurerm_log_analytics_workspace.main[0].id
}
}

addon_profile {
http_application_routing {
enabled = var.enable_http_application_routing
dynamic "ingress_application_gateway" {
for_each = var.ingress_application_gateway_enabled ? { "loop" = "once" } : {}
content {
gateway_id = var.gateway_id
gateway_name = var.gateway_name
subnet_cidr = var.subnet_cidr
subnet_id = var.subnet_id
}
}

kube_dashboard {
enabled = var.enable_kube_dashboard
}
azure_active_directory_role_based_access_control {
managed = var.rbac_aad_managed
admin_group_object_ids = var.rbac_aad_managed ? var.rbac_aad_admin_group_object_ids : null
azure_rbac_enabled = var.rbac_aad_managed ? var.enable_role_based_access_control : null
client_app_id = !var.rbac_aad_managed ? var.rbac_aad_client_app_id : null
server_app_id = !var.rbac_aad_managed ? var.rbac_aad_server_app_id : null
server_app_secret = !var.rbac_aad_managed ? var.rbac_aad_server_app_secret : null
}

azure_policy {
enabled = var.enable_azure_policy
}
network_profile {
network_plugin = var.network_plugin
network_policy = var.network_policy
dns_service_ip = var.dns_service_ip
docker_bridge_cidr = var.docker_bridge_cidr
outbound_type = var.outbound_type
pod_cidr = var.network_plugin != "azure" ? var.pod_cidr : null
service_cidr = var.service_cidr
}

oms_agent {
enabled = var.enable_log_analytics_workspace
log_analytics_workspace_id = var.enable_log_analytics_workspace ? azurerm_log_analytics_workspace.main[0].id : null
}
dynamic "key_vault_secrets_provider" {
for_each = var.key_vault_secrets_provider_enabled ? var.key_vault_secrets_provider : []

dynamic "ingress_application_gateway" {
for_each = var.enable_ingress_application_gateway == null ? [] : ["ingress_application_gateway"]
content {
enabled = var.enable_ingress_application_gateway
gateway_id = var.ingress_application_gateway_id
gateway_name = var.ingress_application_gateway_name
subnet_cidr = var.ingress_application_gateway_subnet_cidr
subnet_id = var.ingress_application_gateway_subnet_id
}
content {
secret_rotation_enabled = key_vault_secrets_provider.value.secret_rotation_enabled
secret_rotation_interval = key_vault_secrets_provider.value.secret_rotation_interval
}
}

role_based_access_control {
enabled = var.enable_role_based_access_control
maintenance_window {
dynamic "allowed" {
for_each = var.allowed_maintenance_windows

dynamic "azure_active_directory" {
for_each = var.enable_role_based_access_control && var.rbac_aad_managed ? ["rbac"] : []
content {
managed = true
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
day = allowed.value.day
hours = allowed.value.hours
}
}

dynamic "azure_active_directory" {
for_each = var.enable_role_based_access_control && !var.rbac_aad_managed ? ["rbac"] : []
dynamic "not_allowed" {
for_each = var.not_allowed_maintenance_windows

content {
managed = false
client_app_id = var.rbac_aad_client_app_id
server_app_id = var.rbac_aad_server_app_id
server_app_secret = var.rbac_aad_server_app_secret
start = not_allowed.value.start
end = not_allowed.value.end
}
}
}

network_profile {
network_plugin = var.network_plugin
network_policy = var.network_policy
dns_service_ip = var.net_profile_dns_service_ip
docker_bridge_cidr = var.net_profile_docker_bridge_cidr
outbound_type = var.net_profile_outbound_type
pod_cidr = var.net_profile_pod_cidr
service_cidr = var.net_profile_service_cidr
}

tags = var.tags
}


# Log Analytics
resource "azurerm_log_analytics_workspace" "main" {
count = var.enable_log_analytics_workspace ? 1 : 0
name = var.cluster_log_analytics_workspace_name == null ? "${var.prefix}-workspace" : var.cluster_log_analytics_workspace_name
count = var.log_analytics_workspace_enabled ? 1 : 0
name = var.log_analytics_workspace_name == null ? "${coalesce(var.cluster_name, var.prefix)}-workspace" : var.log_analytics_workspace_name
location = data.azurerm_resource_group.main.location
resource_group_name = var.resource_group_name
sku = var.log_analytics_workspace_sku
Expand All @@ -163,7 +145,7 @@ resource "azurerm_log_analytics_workspace" "main" {
}

resource "azurerm_log_analytics_solution" "main" {
count = var.enable_log_analytics_workspace ? 1 : 0
count = var.log_analytics_solution_enabled && var.log_analytics_workspace_enabled ? 1 : 0
solution_name = "ContainerInsights"
location = data.azurerm_resource_group.main.location
resource_group_name = var.resource_group_name
Expand All @@ -178,4 +160,11 @@ resource "azurerm_log_analytics_solution" "main" {
tags = var.tags
}


# Permissions on provided Azure Container Registry
resource "azurerm_role_assignment" "azure_container_registry" {
count = var.azure_container_registry_enabled ? 1 : 0
principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = var.azure_container_registry_id
skip_service_principal_aad_check = true
}
21 changes: 0 additions & 21 deletions modules/ssh-key/main.tf

This file was deleted.

Loading