|
| 1 | +//go:build e2e |
| 2 | + |
| 3 | +package test |
| 4 | + |
| 5 | +import ( |
| 6 | + "context" |
| 7 | + "fmt" |
| 8 | + "reflect" |
| 9 | + "testing" |
| 10 | + "time" |
| 11 | + |
| 12 | + "codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v3" |
| 13 | + "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" |
| 14 | + "github.com/openshift-pipelines/pipelines-as-code/pkg/formatting" |
| 15 | + "github.com/openshift-pipelines/pipelines-as-code/pkg/kubeinteraction" |
| 16 | + "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" |
| 17 | + tgitea "github.com/openshift-pipelines/pipelines-as-code/test/pkg/gitea" |
| 18 | + twait "github.com/openshift-pipelines/pipelines-as-code/test/pkg/wait" |
| 19 | + "gotest.tools/v3/assert" |
| 20 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 21 | +) |
| 22 | + |
| 23 | +// TestGiteaRetestAfterPipelineRunPruning verifies that /retest only re-runs |
| 24 | +// failed pipelines when PipelineRun objects have been pruned from the cluster. |
| 25 | +// |
| 26 | +// This relies on GetCommitStatuses returning Forgejo commit statuses so that |
| 27 | +// the annotation matcher can detect previously successful runs. |
| 28 | +// |
| 29 | +// Flow: |
| 30 | +// 1. Create PR with 2 pipelines: one that succeeds, one that fails |
| 31 | +// 2. Wait for both to complete |
| 32 | +// 3. Delete all PipelineRun objects (simulating pruning) |
| 33 | +// 4. Issue /retest |
| 34 | +// 5. Assert that only the failed pipeline is re-run. |
| 35 | +func TestGiteaRetestAfterPipelineRunPruning(t *testing.T) { |
| 36 | + topts := &tgitea.TestOpts{ |
| 37 | + TargetEvent: triggertype.PullRequest.String(), |
| 38 | + SkipEventsCheck: true, |
| 39 | + YAMLFiles: map[string]string{ |
| 40 | + ".tekton/always-good-pipelinerun.yaml": "testdata/always-good-pipelinerun.yaml", |
| 41 | + ".tekton/pipelinerun-exit-1.yaml": "testdata/failures/pipelinerun-exit-1.yaml", |
| 42 | + }, |
| 43 | + } |
| 44 | + ctx, cleanup := tgitea.TestPR(t, topts) |
| 45 | + defer cleanup() |
| 46 | + |
| 47 | + sha := topts.SHA |
| 48 | + labelSelector := fmt.Sprintf("%s=%s", keys.SHA, formatting.CleanValueKubernetes(sha)) |
| 49 | + |
| 50 | + // Wait for both PipelineRuns to appear |
| 51 | + topts.ParamsRun.Clients.Log.Infof("Waiting for 2 PipelineRuns to appear") |
| 52 | + err := twait.UntilMinPRAppeared(ctx, topts.ParamsRun.Clients, twait.Opts{ |
| 53 | + RepoName: topts.TargetNS, |
| 54 | + Namespace: topts.TargetNS, |
| 55 | + PollTimeout: twait.DefaultTimeout, |
| 56 | + TargetSHA: formatting.CleanValueKubernetes(sha), |
| 57 | + }, 2) |
| 58 | + assert.NilError(t, err) |
| 59 | + |
| 60 | + // Wait for repository to have at least 2 status entries |
| 61 | + topts.ParamsRun.Clients.Log.Infof("Waiting for Repository status to have 2 entries") |
| 62 | + _, err = twait.UntilRepositoryUpdated(ctx, topts.ParamsRun.Clients, twait.Opts{ |
| 63 | + RepoName: topts.TargetNS, |
| 64 | + Namespace: topts.TargetNS, |
| 65 | + MinNumberStatus: 2, |
| 66 | + PollTimeout: twait.DefaultTimeout, |
| 67 | + TargetSHA: sha, |
| 68 | + FailOnRepoCondition: "no-match", |
| 69 | + }) |
| 70 | + assert.NilError(t, err) |
| 71 | + |
| 72 | + // Verify we have exactly 2 PipelineRuns |
| 73 | + pruns, err := topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{ |
| 74 | + LabelSelector: labelSelector, |
| 75 | + }) |
| 76 | + assert.NilError(t, err) |
| 77 | + assert.Equal(t, len(pruns.Items), 2, "expected 2 initial PipelineRuns") |
| 78 | + |
| 79 | + // Record initial PipelineRun names |
| 80 | + initialPRNames := map[string]bool{} |
| 81 | + for _, pr := range pruns.Items { |
| 82 | + initialPRNames[pr.Name] = true |
| 83 | + } |
| 84 | + |
| 85 | + // Verify Forgejo commit statuses: exactly 1 successful template + 1 failed template |
| 86 | + statuses, _, err := topts.GiteaCNX.Client().ListStatuses( |
| 87 | + topts.Opts.Organization, topts.Opts.Repo, sha, |
| 88 | + forgejo.ListStatusesOption{}, |
| 89 | + ) |
| 90 | + assert.NilError(t, err) |
| 91 | + initialSummary := summarizeTerminalStatuses(statuses) |
| 92 | + successContexts, failureContexts := splitTerminalStatusContexts(initialSummary) |
| 93 | + assert.Equal(t, len(successContexts), 1, "expected exactly 1 successful pipeline context") |
| 94 | + assert.Equal(t, len(failureContexts), 1, "expected exactly 1 failed pipeline context") |
| 95 | + |
| 96 | + successContext := successContexts[0] |
| 97 | + failureContext := failureContexts[0] |
| 98 | + |
| 99 | + // Simulate pruning: delete all PipelineRun objects |
| 100 | + topts.ParamsRun.Clients.Log.Infof("Deleting all PipelineRuns to simulate pruning") |
| 101 | + err = topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).DeleteCollection(ctx, |
| 102 | + metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labelSelector}) |
| 103 | + assert.NilError(t, err) |
| 104 | + |
| 105 | + // Wait for pruning to complete |
| 106 | + topts.ParamsRun.Clients.Log.Infof("Waiting for PipelineRuns to be deleted") |
| 107 | + pollErr := kubeinteraction.PollImmediateWithContext(ctx, twait.DefaultTimeout, func() (bool, error) { |
| 108 | + pruns, err = topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{ |
| 109 | + LabelSelector: labelSelector, |
| 110 | + }) |
| 111 | + if err != nil { |
| 112 | + return false, err |
| 113 | + } |
| 114 | + topts.ParamsRun.Clients.Log.Infof("Waiting for PipelineRuns to be deleted: %d remaining", len(pruns.Items)) |
| 115 | + return len(pruns.Items) == 0, nil |
| 116 | + }) |
| 117 | + if pollErr != nil { |
| 118 | + topts.ParamsRun.Clients.Log.Infof("Warning: PipelineRuns not fully deleted after polling: %v (proceeding anyway)", pollErr) |
| 119 | + } |
| 120 | + |
| 121 | + // Issue /retest comment on the PR |
| 122 | + topts.ParamsRun.Clients.Log.Infof("Posting /retest comment on PR %d", topts.PullRequest.Index) |
| 123 | + tgitea.PostCommentOnPullRequest(t, topts, "/retest") |
| 124 | + |
| 125 | + // Wait until the terminal provider statuses stop changing. This avoids |
| 126 | + // false-passing if a second, incorrect rerun is created slightly later. |
| 127 | + topts.ParamsRun.Clients.Log.Infof("Waiting for stable retest status set") |
| 128 | + finalSummary, err := waitForStableGiteaTerminalStatuses(ctx, topts, sha, 3) |
| 129 | + assert.NilError(t, err) |
| 130 | + |
| 131 | + assert.Equal(t, finalSummary[successContext].Success, initialSummary[successContext].Success, |
| 132 | + "expected successful pipeline context %q to not rerun", successContext) |
| 133 | + assert.Equal(t, finalSummary[successContext].Failure, initialSummary[successContext].Failure, |
| 134 | + "expected successful pipeline context %q to not gain failing statuses", successContext) |
| 135 | + assert.Equal(t, finalSummary[failureContext].Success, initialSummary[failureContext].Success, |
| 136 | + "expected failed pipeline context %q to remain unsuccessful", failureContext) |
| 137 | + assert.Equal(t, finalSummary[failureContext].Failure, initialSummary[failureContext].Failure+1, |
| 138 | + "expected failed pipeline context %q to rerun exactly once", failureContext) |
| 139 | + |
| 140 | + // Assert: only the failed pipeline should have been re-run. |
| 141 | + prunsAfterRetest, err := topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{ |
| 142 | + LabelSelector: labelSelector, |
| 143 | + }) |
| 144 | + assert.NilError(t, err) |
| 145 | + |
| 146 | + newCount := 0 |
| 147 | + for _, pr := range prunsAfterRetest.Items { |
| 148 | + if !initialPRNames[pr.Name] { |
| 149 | + newCount++ |
| 150 | + } |
| 151 | + } |
| 152 | + assert.Equal(t, newCount, 1, |
| 153 | + "expected only 1 new PipelineRun after /retest (only the failed pipeline should re-run), but got %d", |
| 154 | + newCount) |
| 155 | +} |
| 156 | + |
| 157 | +type terminalStatusSummary struct { |
| 158 | + Success int |
| 159 | + Failure int |
| 160 | +} |
| 161 | + |
| 162 | +func summarizeTerminalStatuses(statuses []*forgejo.Status) map[string]terminalStatusSummary { |
| 163 | + summary := map[string]terminalStatusSummary{} |
| 164 | + for _, status := range statuses { |
| 165 | + if status == nil { |
| 166 | + continue |
| 167 | + } |
| 168 | + contextSummary := summary[status.Context] |
| 169 | + switch status.State { |
| 170 | + case forgejo.StatusSuccess: |
| 171 | + contextSummary.Success++ |
| 172 | + case forgejo.StatusFailure, forgejo.StatusError: |
| 173 | + contextSummary.Failure++ |
| 174 | + default: |
| 175 | + continue |
| 176 | + } |
| 177 | + summary[status.Context] = contextSummary |
| 178 | + } |
| 179 | + return summary |
| 180 | +} |
| 181 | + |
| 182 | +func splitTerminalStatusContexts(summary map[string]terminalStatusSummary) ([]string, []string) { |
| 183 | + successContexts := []string{} |
| 184 | + failureContexts := []string{} |
| 185 | + for contextName, counts := range summary { |
| 186 | + switch { |
| 187 | + case counts.Success > 0 && counts.Failure == 0: |
| 188 | + successContexts = append(successContexts, contextName) |
| 189 | + case counts.Failure > 0 && counts.Success == 0: |
| 190 | + failureContexts = append(failureContexts, contextName) |
| 191 | + } |
| 192 | + } |
| 193 | + return successContexts, failureContexts |
| 194 | +} |
| 195 | + |
| 196 | +func waitForStableGiteaTerminalStatuses(ctx context.Context, topts *tgitea.TestOpts, sha string, minTerminalStatuses int) (map[string]terminalStatusSummary, error) { |
| 197 | + const stableWindow = 5 * time.Second |
| 198 | + |
| 199 | + var ( |
| 200 | + lastSummary map[string]terminalStatusSummary |
| 201 | + stableSummary map[string]terminalStatusSummary |
| 202 | + stableSince time.Time |
| 203 | + ) |
| 204 | + |
| 205 | + err := kubeinteraction.PollImmediateWithContext(ctx, twait.DefaultTimeout, func() (bool, error) { |
| 206 | + statuses, _, err := topts.GiteaCNX.Client().ListStatuses( |
| 207 | + topts.Opts.Organization, topts.Opts.Repo, sha, |
| 208 | + forgejo.ListStatusesOption{}, |
| 209 | + ) |
| 210 | + if err != nil { |
| 211 | + return false, err |
| 212 | + } |
| 213 | + |
| 214 | + summary := summarizeTerminalStatuses(statuses) |
| 215 | + terminalCount := 0 |
| 216 | + for _, counts := range summary { |
| 217 | + terminalCount += counts.Success + counts.Failure |
| 218 | + } |
| 219 | + if terminalCount < minTerminalStatuses { |
| 220 | + return false, nil |
| 221 | + } |
| 222 | + |
| 223 | + if !reflect.DeepEqual(summary, lastSummary) { |
| 224 | + lastSummary = summary |
| 225 | + stableSummary = summary |
| 226 | + stableSince = time.Now() |
| 227 | + return false, nil |
| 228 | + } |
| 229 | + |
| 230 | + return time.Since(stableSince) >= stableWindow, nil |
| 231 | + }) |
| 232 | + if err != nil { |
| 233 | + return nil, err |
| 234 | + } |
| 235 | + return stableSummary, nil |
| 236 | +} |
0 commit comments