Skip to content

Commit 9c32589

Browse files
chmouelclaude
andcommitted
feat: implement GetCommitStatuses on forgejo
Previously the Gitea/Forgejo provider returned nil for GetCommitStatuses, which meant the annotation matcher couldn't detect pruned-but-successful pipeline runs, causing /retest to re-run every pipeline instead of just the failed ones. This commit replaces the stub with a real implementation that calls ListStatuses from the Forgejo SDK, maps Context→Name and State→Status, and deduplicates using a seen map. Unit tests cover happy path, deduplication, empty response, nil client, and API errors. An e2e test (TestGiteaRetestAfterPipelineRunPruning) verifies the full flow: run two pipelines, prune the PipelineRun objects, issue /retest, and assert only the failed pipeline is re-run. Jira: https://redhat.atlassian.net/browse/SRVKP-11529 Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> Signed-off-by: Chmouel Boudjnah <chmouel@redhat.com>
1 parent 4c8cf58 commit 9c32589

File tree

3 files changed

+375
-2
lines changed

3 files changed

+375
-2
lines changed

pkg/provider/gitea/gitea.go

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -369,8 +369,36 @@ func (v *Provider) createStatusCommit(ctx context.Context, event *info.Event, pa
369369
return nil
370370
}
371371

372-
func (v *Provider) GetCommitStatuses(_ context.Context, _ *info.Event) ([]provider.CommitStatusInfo, error) {
373-
return nil, nil
372+
func (v *Provider) GetCommitStatuses(_ context.Context, event *info.Event) ([]provider.CommitStatusInfo, error) {
373+
if v.giteaClient == nil {
374+
return nil, fmt.Errorf("no gitea client has been initialized")
375+
}
376+
377+
statuses, _, err := v.Client().ListStatuses(
378+
event.Organization, event.Repository, event.SHA,
379+
forgejo.ListStatusesOption{},
380+
)
381+
if err != nil {
382+
return nil, err
383+
}
384+
385+
var (
386+
result []provider.CommitStatusInfo
387+
seen = map[string]struct{}{}
388+
)
389+
for _, s := range statuses {
390+
key := fmt.Sprintf("%s\x00%s", s.Context, string(s.State))
391+
if _, ok := seen[key]; ok {
392+
continue
393+
}
394+
seen[key] = struct{}{}
395+
result = append(result, provider.CommitStatusInfo{
396+
Name: s.Context,
397+
Status: string(s.State),
398+
})
399+
}
400+
401+
return result, nil
374402
}
375403

376404
func (v *Provider) GetTektonDir(_ context.Context, event *info.Event, path, provenance string) (string, error) {

pkg/provider/gitea/gitea_test.go

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1219,3 +1219,112 @@ func TestGetCommitInfoPRLookupPopulatesURLs(t *testing.T) {
12191219
assert.Equal(t, "https://gitea.com/fork-owner/repo", event.HeadURL, "HeadURL should be populated from PR lookup")
12201220
assert.Equal(t, "https://gitea.com/owner/repo", event.BaseURL, "BaseURL should be populated from PR lookup")
12211221
}
1222+
1223+
func TestGetCommitStatuses(t *testing.T) {
1224+
tests := []struct {
1225+
name string
1226+
event *info.Event
1227+
nilClient bool
1228+
mockHandler func(http.ResponseWriter, *http.Request)
1229+
want []provider.CommitStatusInfo
1230+
wantErr string
1231+
}{
1232+
{
1233+
name: "happy path with multiple statuses",
1234+
event: &info.Event{
1235+
Organization: "org",
1236+
Repository: "repo",
1237+
SHA: "abc123",
1238+
},
1239+
mockHandler: func(rw http.ResponseWriter, _ *http.Request) {
1240+
fmt.Fprint(rw, `[
1241+
{"context":"Pipelines as Code CI / pr-one","status":"success"},
1242+
{"context":"Pipelines as Code CI / pr-two","status":"failure"}
1243+
]`)
1244+
},
1245+
want: []provider.CommitStatusInfo{
1246+
{Name: "Pipelines as Code CI / pr-one", Status: "success"},
1247+
{Name: "Pipelines as Code CI / pr-two", Status: "failure"},
1248+
},
1249+
},
1250+
{
1251+
name: "deduplicates identical statuses",
1252+
event: &info.Event{
1253+
Organization: "org",
1254+
Repository: "repo",
1255+
SHA: "abc123",
1256+
},
1257+
mockHandler: func(rw http.ResponseWriter, _ *http.Request) {
1258+
fmt.Fprint(rw, `[
1259+
{"context":"CI / build","status":"success"},
1260+
{"context":"CI / build","status":"success"},
1261+
{"context":"CI / build","status":"failure"}
1262+
]`)
1263+
},
1264+
want: []provider.CommitStatusInfo{
1265+
{Name: "CI / build", Status: "success"},
1266+
{Name: "CI / build", Status: "failure"},
1267+
},
1268+
},
1269+
{
1270+
name: "empty response",
1271+
event: &info.Event{
1272+
Organization: "org",
1273+
Repository: "repo",
1274+
SHA: "abc123",
1275+
},
1276+
mockHandler: func(rw http.ResponseWriter, _ *http.Request) {
1277+
fmt.Fprint(rw, `[]`)
1278+
},
1279+
},
1280+
{
1281+
name: "nil client returns error",
1282+
nilClient: true,
1283+
event: &info.Event{
1284+
Organization: "org",
1285+
Repository: "repo",
1286+
SHA: "abc123",
1287+
},
1288+
wantErr: "no gitea client has been initialized",
1289+
},
1290+
{
1291+
name: "API error",
1292+
event: &info.Event{
1293+
Organization: "org",
1294+
Repository: "repo",
1295+
SHA: "abc123",
1296+
},
1297+
mockHandler: func(rw http.ResponseWriter, _ *http.Request) {
1298+
rw.WriteHeader(http.StatusInternalServerError)
1299+
},
1300+
wantErr: "500",
1301+
},
1302+
}
1303+
1304+
for _, tt := range tests {
1305+
t.Run(tt.name, func(t *testing.T) {
1306+
var p *Provider
1307+
if tt.nilClient {
1308+
p = &Provider{}
1309+
} else {
1310+
fakeclient, mux, teardown := tgitea.Setup(t)
1311+
defer teardown()
1312+
1313+
mux.HandleFunc(
1314+
fmt.Sprintf("/repos/%s/%s/commits/%s/statuses",
1315+
tt.event.Organization, tt.event.Repository, tt.event.SHA),
1316+
tt.mockHandler,
1317+
)
1318+
p = &Provider{giteaClient: fakeclient}
1319+
}
1320+
1321+
got, err := p.GetCommitStatuses(context.Background(), tt.event)
1322+
if tt.wantErr != "" {
1323+
assert.ErrorContains(t, err, tt.wantErr)
1324+
return
1325+
}
1326+
assert.NilError(t, err)
1327+
assert.DeepEqual(t, got, tt.want)
1328+
})
1329+
}
1330+
}

test/gitea_retest_pruned_test.go

Lines changed: 236 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,236 @@
1+
//go:build e2e
2+
3+
package test
4+
5+
import (
6+
"context"
7+
"fmt"
8+
"reflect"
9+
"testing"
10+
"time"
11+
12+
"codeberg.org/mvdkleijn/forgejo-sdk/forgejo/v3"
13+
"github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys"
14+
"github.com/openshift-pipelines/pipelines-as-code/pkg/formatting"
15+
"github.com/openshift-pipelines/pipelines-as-code/pkg/kubeinteraction"
16+
"github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype"
17+
tgitea "github.com/openshift-pipelines/pipelines-as-code/test/pkg/gitea"
18+
twait "github.com/openshift-pipelines/pipelines-as-code/test/pkg/wait"
19+
"gotest.tools/v3/assert"
20+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21+
)
22+
23+
// TestGiteaRetestAfterPipelineRunPruning verifies that /retest only re-runs
24+
// failed pipelines when PipelineRun objects have been pruned from the cluster.
25+
//
26+
// This relies on GetCommitStatuses returning Forgejo commit statuses so that
27+
// the annotation matcher can detect previously successful runs.
28+
//
29+
// Flow:
30+
// 1. Create PR with 2 pipelines: one that succeeds, one that fails
31+
// 2. Wait for both to complete
32+
// 3. Delete all PipelineRun objects (simulating pruning)
33+
// 4. Issue /retest
34+
// 5. Assert that only the failed pipeline is re-run.
35+
func TestGiteaRetestAfterPipelineRunPruning(t *testing.T) {
36+
topts := &tgitea.TestOpts{
37+
TargetEvent: triggertype.PullRequest.String(),
38+
SkipEventsCheck: true,
39+
YAMLFiles: map[string]string{
40+
".tekton/always-good-pipelinerun.yaml": "testdata/always-good-pipelinerun.yaml",
41+
".tekton/pipelinerun-exit-1.yaml": "testdata/failures/pipelinerun-exit-1.yaml",
42+
},
43+
}
44+
ctx, cleanup := tgitea.TestPR(t, topts)
45+
defer cleanup()
46+
47+
sha := topts.SHA
48+
labelSelector := fmt.Sprintf("%s=%s", keys.SHA, formatting.CleanValueKubernetes(sha))
49+
50+
// Wait for both PipelineRuns to appear
51+
topts.ParamsRun.Clients.Log.Infof("Waiting for 2 PipelineRuns to appear")
52+
err := twait.UntilMinPRAppeared(ctx, topts.ParamsRun.Clients, twait.Opts{
53+
RepoName: topts.TargetNS,
54+
Namespace: topts.TargetNS,
55+
PollTimeout: twait.DefaultTimeout,
56+
TargetSHA: formatting.CleanValueKubernetes(sha),
57+
}, 2)
58+
assert.NilError(t, err)
59+
60+
// Wait for repository to have at least 2 status entries
61+
topts.ParamsRun.Clients.Log.Infof("Waiting for Repository status to have 2 entries")
62+
_, err = twait.UntilRepositoryUpdated(ctx, topts.ParamsRun.Clients, twait.Opts{
63+
RepoName: topts.TargetNS,
64+
Namespace: topts.TargetNS,
65+
MinNumberStatus: 2,
66+
PollTimeout: twait.DefaultTimeout,
67+
TargetSHA: sha,
68+
FailOnRepoCondition: "no-match",
69+
})
70+
assert.NilError(t, err)
71+
72+
// Verify we have exactly 2 PipelineRuns
73+
pruns, err := topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{
74+
LabelSelector: labelSelector,
75+
})
76+
assert.NilError(t, err)
77+
assert.Equal(t, len(pruns.Items), 2, "expected 2 initial PipelineRuns")
78+
79+
// Record initial PipelineRun names
80+
initialPRNames := map[string]bool{}
81+
for _, pr := range pruns.Items {
82+
initialPRNames[pr.Name] = true
83+
}
84+
85+
// Verify Forgejo commit statuses: exactly 1 successful template + 1 failed template
86+
statuses, _, err := topts.GiteaCNX.Client().ListStatuses(
87+
topts.Opts.Organization, topts.Opts.Repo, sha,
88+
forgejo.ListStatusesOption{},
89+
)
90+
assert.NilError(t, err)
91+
initialSummary := summarizeTerminalStatuses(statuses)
92+
successContexts, failureContexts := splitTerminalStatusContexts(initialSummary)
93+
assert.Equal(t, len(successContexts), 1, "expected exactly 1 successful pipeline context")
94+
assert.Equal(t, len(failureContexts), 1, "expected exactly 1 failed pipeline context")
95+
96+
successContext := successContexts[0]
97+
failureContext := failureContexts[0]
98+
99+
// Simulate pruning: delete all PipelineRun objects
100+
topts.ParamsRun.Clients.Log.Infof("Deleting all PipelineRuns to simulate pruning")
101+
err = topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).DeleteCollection(ctx,
102+
metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labelSelector})
103+
assert.NilError(t, err)
104+
105+
// Wait for pruning to complete
106+
topts.ParamsRun.Clients.Log.Infof("Waiting for PipelineRuns to be deleted")
107+
pollErr := kubeinteraction.PollImmediateWithContext(ctx, twait.DefaultTimeout, func() (bool, error) {
108+
pruns, err = topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{
109+
LabelSelector: labelSelector,
110+
})
111+
if err != nil {
112+
return false, err
113+
}
114+
topts.ParamsRun.Clients.Log.Infof("Waiting for PipelineRuns to be deleted: %d remaining", len(pruns.Items))
115+
return len(pruns.Items) == 0, nil
116+
})
117+
if pollErr != nil {
118+
topts.ParamsRun.Clients.Log.Infof("Warning: PipelineRuns not fully deleted after polling: %v (proceeding anyway)", pollErr)
119+
}
120+
121+
// Issue /retest comment on the PR
122+
topts.ParamsRun.Clients.Log.Infof("Posting /retest comment on PR %d", topts.PullRequest.Index)
123+
tgitea.PostCommentOnPullRequest(t, topts, "/retest")
124+
125+
// Wait until the terminal provider statuses stop changing. This avoids
126+
// false-passing if a second, incorrect rerun is created slightly later.
127+
topts.ParamsRun.Clients.Log.Infof("Waiting for stable retest status set")
128+
finalSummary, err := waitForStableGiteaTerminalStatuses(ctx, topts, sha, 3)
129+
assert.NilError(t, err)
130+
131+
assert.Equal(t, finalSummary[successContext].Success, initialSummary[successContext].Success,
132+
"expected successful pipeline context %q to not rerun", successContext)
133+
assert.Equal(t, finalSummary[successContext].Failure, initialSummary[successContext].Failure,
134+
"expected successful pipeline context %q to not gain failing statuses", successContext)
135+
assert.Equal(t, finalSummary[failureContext].Success, initialSummary[failureContext].Success,
136+
"expected failed pipeline context %q to remain unsuccessful", failureContext)
137+
assert.Equal(t, finalSummary[failureContext].Failure, initialSummary[failureContext].Failure+1,
138+
"expected failed pipeline context %q to rerun exactly once", failureContext)
139+
140+
// Assert: only the failed pipeline should have been re-run.
141+
prunsAfterRetest, err := topts.ParamsRun.Clients.Tekton.TektonV1().PipelineRuns(topts.TargetNS).List(ctx, metav1.ListOptions{
142+
LabelSelector: labelSelector,
143+
})
144+
assert.NilError(t, err)
145+
146+
newCount := 0
147+
for _, pr := range prunsAfterRetest.Items {
148+
if !initialPRNames[pr.Name] {
149+
newCount++
150+
}
151+
}
152+
assert.Equal(t, newCount, 1,
153+
"expected only 1 new PipelineRun after /retest (only the failed pipeline should re-run), but got %d",
154+
newCount)
155+
}
156+
157+
type terminalStatusSummary struct {
158+
Success int
159+
Failure int
160+
}
161+
162+
func summarizeTerminalStatuses(statuses []*forgejo.Status) map[string]terminalStatusSummary {
163+
summary := map[string]terminalStatusSummary{}
164+
for _, status := range statuses {
165+
if status == nil {
166+
continue
167+
}
168+
contextSummary := summary[status.Context]
169+
switch status.State {
170+
case forgejo.StatusSuccess:
171+
contextSummary.Success++
172+
case forgejo.StatusFailure, forgejo.StatusError:
173+
contextSummary.Failure++
174+
default:
175+
continue
176+
}
177+
summary[status.Context] = contextSummary
178+
}
179+
return summary
180+
}
181+
182+
func splitTerminalStatusContexts(summary map[string]terminalStatusSummary) ([]string, []string) {
183+
successContexts := []string{}
184+
failureContexts := []string{}
185+
for contextName, counts := range summary {
186+
switch {
187+
case counts.Success > 0 && counts.Failure == 0:
188+
successContexts = append(successContexts, contextName)
189+
case counts.Failure > 0 && counts.Success == 0:
190+
failureContexts = append(failureContexts, contextName)
191+
}
192+
}
193+
return successContexts, failureContexts
194+
}
195+
196+
func waitForStableGiteaTerminalStatuses(ctx context.Context, topts *tgitea.TestOpts, sha string, minTerminalStatuses int) (map[string]terminalStatusSummary, error) {
197+
const stableWindow = 5 * time.Second
198+
199+
var (
200+
lastSummary map[string]terminalStatusSummary
201+
stableSummary map[string]terminalStatusSummary
202+
stableSince time.Time
203+
)
204+
205+
err := kubeinteraction.PollImmediateWithContext(ctx, twait.DefaultTimeout, func() (bool, error) {
206+
statuses, _, err := topts.GiteaCNX.Client().ListStatuses(
207+
topts.Opts.Organization, topts.Opts.Repo, sha,
208+
forgejo.ListStatusesOption{},
209+
)
210+
if err != nil {
211+
return false, err
212+
}
213+
214+
summary := summarizeTerminalStatuses(statuses)
215+
terminalCount := 0
216+
for _, counts := range summary {
217+
terminalCount += counts.Success + counts.Failure
218+
}
219+
if terminalCount < minTerminalStatuses {
220+
return false, nil
221+
}
222+
223+
if !reflect.DeepEqual(summary, lastSummary) {
224+
lastSummary = summary
225+
stableSummary = summary
226+
stableSince = time.Now()
227+
return false, nil
228+
}
229+
230+
return time.Since(stableSince) >= stableWindow, nil
231+
})
232+
if err != nil {
233+
return nil, err
234+
}
235+
return stableSummary, nil
236+
}

0 commit comments

Comments
 (0)