Skip to content

Commit 7f80ff1

Browse files
Merge pull request #546 from element-hq/gaelg/skip-completed-pods
matrix-tools syn2mas: filter completed pods
2 parents 687c133 + 7d161c0 commit 7f80ff1

4 files changed

Lines changed: 25 additions & 18 deletions

File tree

charts/matrix-stack/source/common/sub_schema_values.yaml.j2

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ SPDX-License-Identifier: AGPL-3.0-only
1313

1414
## The matrix-tools image, used in multiple components
1515
matrixTools:
16-
{{ image(registry="ghcr.io", repository="element-hq/ess-helm/matrix-tools", tag="0.5.2") | indent(2) }}
16+
{{ image(registry="ghcr.io", repository="element-hq/ess-helm/matrix-tools", tag="0.5.3") | indent(2) }}
1717

1818
## CertManager Issuer to configure by default automatically on all ingresses
1919
## If configured, the chart will automatically generate the tlsSecret name for all ingresses

charts/matrix-stack/values.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ matrixTools:
2020

2121
## The tag of the container image to use.
2222
## Defaults to the Chart's appVersion if not set
23-
tag: "0.5.2"
23+
tag: "0.5.3"
2424

2525
## Container digest to use. Used to pull the image instead of the image tag / Chart appVersion if set
2626
# digest:

matrix-tools/internal/pkg/syn2mas/syn2mas.go

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import (
1313
"strconv"
1414
"time"
1515

16+
corev1 "k8s.io/api/core/v1"
1617
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1718
"k8s.io/client-go/kubernetes"
1819
)
@@ -40,7 +41,7 @@ func scaleDownSynapse(client kubernetes.Interface, namespace string) map[string]
4041
}
4142

4243
for {
43-
fmt.Println("Waiting for replicas to be 0 on all synapse replicas" )
44+
fmt.Println("Waiting for replicas to be 0 on all synapse replicas")
4445
sts, err := stsClient.List(ctx, metav1.ListOptions{
4546
LabelSelector: "app.kubernetes.io/component=matrix-server",
4647
})
@@ -56,7 +57,7 @@ func scaleDownSynapse(client kubernetes.Interface, namespace string) map[string]
5657
allStsDown = false
5758
}
5859
}
59-
if (allStsDown) {
60+
if allStsDown {
6061
break
6162
}
6263
}
@@ -71,19 +72,24 @@ func scaleDownSynapse(client kubernetes.Interface, namespace string) map[string]
7172
}
7273
}
7374
for {
74-
fmt.Println("Waiting for all synapse pods to be gone..." )
75+
fmt.Println("Waiting for all synapse pods to be gone...")
7576
podsClient := client.CoreV1().Pods(namespace)
7677
pods, err := podsClient.List(ctx, metav1.ListOptions{
77-
LabelSelector: "app.kubernetes.io/component=matrix-server,app.kubernetes.io/name!=synapse-check-config",
78+
LabelSelector: "app.kubernetes.io/component=matrix-server",
7879
})
7980
if err != nil {
8081
fmt.Println(err)
8182
os.Exit(1)
8283
}
83-
84-
if len(pods.Items) != 0 {
85-
fmt.Printf("%d pods remaining. Waiting %d seconds...\n", len(pods.Items), (remainingRetries))
86-
if (remainingRetries <= 0) {
84+
podsFound := 0
85+
for _, pod := range pods.Items {
86+
if pod.Status.Phase != corev1.PodSucceeded {
87+
podsFound = podsFound + 1
88+
}
89+
}
90+
if podsFound != 0 {
91+
fmt.Printf("Pods remaining. Waiting %d seconds...\n", (remainingRetries))
92+
if remainingRetries <= 0 {
8793
break
8894
}
8995
time.Sleep(time.Second)
@@ -94,7 +100,7 @@ func scaleDownSynapse(client kubernetes.Interface, namespace string) map[string]
94100
remainingRetries = remainingRetries - 1
95101
}
96102

97-
if (!allPodsDown) {
103+
if !allPodsDown {
98104
fmt.Println("StatefulSet are down, but pods matching matrix-server component are remaining. Something wrong is happening.")
99105
os.Exit(1)
100106
}
@@ -105,7 +111,7 @@ func scaleBack(client kubernetes.Interface, namespace string, scaledSts map[stri
105111
ctx := context.Background()
106112
fmt.Println("Scaling back synapse")
107113
stsClient := client.AppsV1().StatefulSets(namespace)
108-
for stsName, replicas := range scaledSts{
114+
for stsName, replicas := range scaledSts {
109115
fmt.Printf("Scaling back to %d replicas on %s\n", replicas, stsName)
110116
sts, err := stsClient.Get(ctx, stsName, metav1.GetOptions{})
111117
if err != nil {
@@ -132,12 +138,12 @@ func RunSyn2MAS(client kubernetes.Interface, namespace string, synapseConfigPath
132138
var exitError *exec.ExitError
133139
var ok bool
134140
if err != nil {
135-
// Detailed error handling
136-
if exitError, ok = err.(*exec.ExitError); ok {
137-
fmt.Printf("Command failed with status: %v\n", exitError.ExitCode())
138-
} else {
139-
fmt.Println(err)
140-
}
141+
// Detailed error handling
142+
if exitError, ok = err.(*exec.ExitError); ok {
143+
fmt.Printf("Command failed with status: %v\n", exitError.ExitCode())
144+
} else {
145+
fmt.Println(err)
146+
}
141147
}
142148
scaleBack(client, namespace, originStsReplicas)
143149
if exitError != nil {

newsfragments/546.fixed.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
matrix-tools: Skip any completed pods when scaling down synapse pods in syn2mas migration.

0 commit comments

Comments
 (0)