Skip to content

Commit 9718d21

Browse files
committed
refactor(llm): simplify and restructure the LLM package
Remove factory, orchestrator, and output_handler files and replace them with a cleaner registry-based approach. Each provider now registers itself and receives a single config struct instead of scattered arguments. Move shared types out of the ltypes subpackage into the main llm package. All existing functionality is preserved. Jira: https://redhat.atlassian.net/browse/SRVKP-11539 Signed-off-by: Chmouel Boudjnah <chmouel@redhat.com>
1 parent 658e9d4 commit 9718d21

21 files changed

+1322
-1957
lines changed

.golangci.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,8 @@ linters:
9090
path: _test\.go
9191
- path: pkg/resolve/resolve.go
9292
text: don't use `init` function
93+
- path: pkg/llm/providers/
94+
text: don't use `init` function
9395
- linters:
9496
- revive
9597
path: pkg/errors/

pkg/llm/analyzer.go

Lines changed: 125 additions & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99
"github.com/openshift-pipelines/pipelines-as-code/pkg/cel"
1010
"github.com/openshift-pipelines/pipelines-as-code/pkg/kubeinteraction"
1111
llmcontext "github.com/openshift-pipelines/pipelines-as-code/pkg/llm/context"
12-
"github.com/openshift-pipelines/pipelines-as-code/pkg/llm/ltypes"
1312
"github.com/openshift-pipelines/pipelines-as-code/pkg/params"
1413
"github.com/openshift-pipelines/pipelines-as-code/pkg/params/info"
1514
"github.com/openshift-pipelines/pipelines-as-code/pkg/provider"
@@ -22,97 +21,126 @@ import (
2221
// AnalysisResult represents the result of an LLM analysis.
2322
type AnalysisResult struct {
2423
Role string
25-
Response *ltypes.AnalysisResponse
24+
Response *AnalysisResponse
2625
Error error
2726
}
2827

29-
// Analyzer coordinates the LLM analysis process.
30-
type Analyzer struct {
31-
run *params.Run
32-
kinteract kubeinteraction.Interface
33-
factory *Factory
34-
assembler *llmcontext.Assembler
35-
logger *zap.SugaredLogger
36-
}
37-
38-
// NewAnalyzer creates a new LLM analyzer.
39-
func NewAnalyzer(run *params.Run, kinteract kubeinteraction.Interface, logger *zap.SugaredLogger) *Analyzer {
40-
return &Analyzer{
41-
run: run,
42-
kinteract: kinteract,
43-
factory: NewFactory(run, kinteract),
44-
assembler: llmcontext.NewAssembler(run, kinteract, logger),
45-
logger: logger,
28+
// ExecuteAnalysis performs the complete LLM analysis workflow.
29+
// This is the single entry point called by the reconciler.
30+
func ExecuteAnalysis(
31+
ctx context.Context,
32+
run *params.Run,
33+
kinteract kubeinteraction.Interface,
34+
logger *zap.SugaredLogger,
35+
repo *v1alpha1.Repository,
36+
pr *tektonv1.PipelineRun,
37+
event *info.Event,
38+
prov provider.Interface,
39+
) error {
40+
if repo.Spec.Settings == nil || repo.Spec.Settings.AIAnalysis == nil || !repo.Spec.Settings.AIAnalysis.Enabled {
41+
logger.Debug("AI analysis not configured or disabled, skipping")
42+
return nil
4643
}
47-
}
4844

49-
// AnalyzeRequest represents a request for LLM analysis.
50-
type AnalyzeRequest struct {
51-
PipelineRun *tektonv1.PipelineRun
52-
Event *info.Event
53-
Repository *v1alpha1.Repository
54-
Provider provider.Interface
55-
}
45+
logger.Infof("Starting LLM analysis for pipeline %s/%s", pr.Namespace, pr.Name)
5646

57-
// Analyze performs LLM analysis based on the repository configuration.
58-
func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]AnalysisResult, error) {
59-
if request == nil {
60-
return nil, fmt.Errorf("analysis request is required")
47+
results, err := analyze(ctx, run, kinteract, logger, repo, pr, event, prov)
48+
if err != nil {
49+
return fmt.Errorf("LLM analysis failed: %w", err)
6150
}
62-
if request.Repository == nil {
63-
return nil, nil
51+
52+
if len(results) == 0 {
53+
logger.Debug("No analysis results generated")
54+
return nil
6455
}
6556

66-
if request.Repository.Spec.Settings == nil || request.Repository.Spec.Settings.AIAnalysis == nil {
67-
a.logger.With(
68-
"repository", request.Repository.Name,
69-
"namespace", request.Repository.Namespace,
70-
).Debug("No AI analysis configuration found, skipping analysis")
57+
for _, result := range results {
58+
if result.Error != nil {
59+
logger.Warnf("Analysis failed for role %s: %v", result.Role, result.Error)
60+
continue
61+
}
62+
if result.Response == nil {
63+
logger.Warnf("No response for role %s", result.Role)
64+
continue
65+
}
66+
67+
logger.Infof("Processing LLM analysis result for role %s, tokens used: %d", result.Role, result.Response.TokensUsed)
68+
69+
// Find the role config and validate output destination
70+
var roleConfig *v1alpha1.AnalysisRole
71+
for i := range repo.Spec.Settings.AIAnalysis.Roles {
72+
if repo.Spec.Settings.AIAnalysis.Roles[i].Name == result.Role {
73+
roleConfig = &repo.Spec.Settings.AIAnalysis.Roles[i]
74+
break
75+
}
76+
}
77+
if roleConfig != nil {
78+
output := roleConfig.GetOutput()
79+
if output != "pr-comment" {
80+
logger.Warnf("Unsupported output destination %q for role %s, skipping (only 'pr-comment' is supported)", output, result.Role)
81+
continue
82+
}
83+
}
84+
85+
if err := postPRComment(ctx, result, event, prov, logger); err != nil {
86+
logger.Warnf("Failed to handle output for role %s: %v", result.Role, err)
87+
}
88+
}
89+
90+
return nil
91+
}
92+
93+
// analyze performs LLM analysis based on the repository configuration.
94+
func analyze(
95+
ctx context.Context,
96+
run *params.Run,
97+
kinteract kubeinteraction.Interface,
98+
logger *zap.SugaredLogger,
99+
repo *v1alpha1.Repository,
100+
pr *tektonv1.PipelineRun,
101+
event *info.Event,
102+
prov provider.Interface,
103+
) ([]AnalysisResult, error) {
104+
if repo == nil || repo.Spec.Settings == nil || repo.Spec.Settings.AIAnalysis == nil {
71105
return nil, nil
72106
}
73107

74-
config := request.Repository.Spec.Settings.AIAnalysis
108+
config := repo.Spec.Settings.AIAnalysis
75109
if !config.Enabled {
76-
a.logger.With(
77-
"repository", request.Repository.Name,
78-
"namespace", request.Repository.Namespace,
79-
).Debug("AI analysis is disabled, skipping analysis")
80110
return nil, nil
81111
}
82112

83-
analysisLogger := a.logger.With(
113+
analysisLogger := logger.With(
84114
"provider", config.Provider,
85-
"pipeline_run", request.PipelineRun.Name,
86-
"namespace", request.PipelineRun.Namespace,
87-
"repository", request.Repository.Name,
115+
"pipeline_run", pr.Name,
116+
"namespace", pr.Namespace,
117+
"repository", repo.Name,
88118
"roles_count", len(config.Roles),
89119
)
90120

91121
analysisLogger.Info("Starting LLM analysis")
92122

93-
if err := a.validateConfig(config); err != nil {
123+
if err := validateAnalysisConfig(config); err != nil {
94124
analysisLogger.With("error", err).Error("Invalid AI analysis configuration")
95125
return nil, fmt.Errorf("invalid AI analysis configuration: %w", err)
96126
}
97127

98-
// Secret must be in the same namespace as the Repository CR
99-
namespace := request.Repository.Namespace
128+
namespace := repo.Namespace
129+
assembler := llmcontext.NewAssembler(run, kinteract, logger)
100130

101-
// Build CEL context for role filtering
102-
celContext, err := a.assembler.BuildCELContext(request.PipelineRun, request.Event, request.Repository)
131+
celContext, err := assembler.BuildCELContext(pr, event, repo)
103132
if err != nil {
104133
analysisLogger.With("error", err).Error("Failed to build CEL context")
105134
return nil, fmt.Errorf("failed to build CEL context: %w", err)
106135
}
107136

108-
// Process each role
109137
results := []AnalysisResult{}
110138
contextCache := make(map[string]map[string]any)
111139

112140
for _, role := range config.Roles {
113141
roleLogger := analysisLogger.With("role", role.Name)
114142

115-
shouldTrigger, err := a.shouldTriggerRole(role, celContext, request.PipelineRun)
143+
shouldTrigger, err := shouldTriggerRole(role, celContext, pr)
116144
if err != nil {
117145
roleLogger.With("error", err, "cel_expression", role.OnCEL).Warn("Failed to evaluate CEL expression")
118146
results = append(results, AnalysisResult{
@@ -133,13 +161,7 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
133161
var roleContext map[string]any
134162
var cached bool
135163
if roleContext, cached = contextCache[contextKey]; !cached {
136-
roleContext, err = a.assembler.BuildContext(
137-
ctx,
138-
request.PipelineRun,
139-
request.Event,
140-
role.ContextItems,
141-
request.Provider,
142-
)
164+
roleContext, err = assembler.BuildContext(ctx, pr, event, role.ContextItems, prov)
143165
if err != nil {
144166
roleLogger.With("error", err).Warn("Failed to build context for role")
145167
results = append(results, AnalysisResult{
@@ -151,8 +173,17 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
151173
contextCache[contextKey] = roleContext
152174
}
153175

154-
// Create LLM client for this role
155-
client, err := a.createClient(ctx, config, namespace, &role)
176+
client, err := NewClient(
177+
ctx,
178+
AIProvider(config.Provider),
179+
config.TokenSecretRef,
180+
namespace,
181+
kinteract,
182+
config.GetAPIURL(),
183+
role.GetModel(),
184+
config.TimeoutSeconds,
185+
config.MaxTokens,
186+
)
156187
if err != nil {
157188
roleLogger.With("error", err).Warn("Failed to create LLM client for role")
158189
results = append(results, AnalysisResult{
@@ -162,20 +193,18 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
162193
continue
163194
}
164195

165-
// Create analysis request
166-
analysisRequest := &ltypes.AnalysisRequest{
196+
analysisRequest := &AnalysisRequest{
167197
Prompt: role.Prompt,
168198
Context: roleContext,
169199
MaxTokens: config.MaxTokens,
170200
TimeoutSeconds: config.TimeoutSeconds,
171201
}
172202

173-
// Apply defaults
174203
if analysisRequest.MaxTokens == 0 {
175-
analysisRequest.MaxTokens = ltypes.DefaultConfig.MaxTokens
204+
analysisRequest.MaxTokens = DefaultMaxTokens
176205
}
177206
if analysisRequest.TimeoutSeconds == 0 {
178-
analysisRequest.TimeoutSeconds = ltypes.DefaultConfig.TimeoutSeconds
207+
analysisRequest.TimeoutSeconds = DefaultTimeoutSeconds
179208
}
180209

181210
roleLogger.With(
@@ -184,8 +213,7 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
184213
"context_items", len(roleContext),
185214
).Debug("Sending analysis request to LLM")
186215

187-
// Perform analysis
188-
var response *ltypes.AnalysisResponse
216+
var response *AnalysisResponse
189217
var analysisErr error
190218
analysisStart := time.Now()
191219

@@ -195,7 +223,7 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
195223
for attempt := 1; attempt <= maxRetries; attempt++ {
196224
response, analysisErr = client.Analyze(ctx, analysisRequest)
197225
if analysisErr == nil {
198-
break // Success
226+
break
199227
}
200228

201229
roleLogger.With(
@@ -256,6 +284,26 @@ func (a *Analyzer) Analyze(ctx context.Context, request *AnalyzeRequest) ([]Anal
256284
return results, nil
257285
}
258286

287+
// postPRComment posts LLM analysis as a PR comment.
288+
func postPRComment(ctx context.Context, result AnalysisResult, event *info.Event, prov provider.Interface, logger *zap.SugaredLogger) error {
289+
if event.PullRequestNumber == 0 {
290+
logger.Debug("No pull request associated with this event, skipping PR comment")
291+
return nil
292+
}
293+
294+
comment := fmt.Sprintf("## 🤖 AI Analysis - %s\n\n%s\n\n---\n*Generated by Pipelines-as-Code LLM Analysis*",
295+
result.Role, result.Response.Content)
296+
297+
updateMarker := fmt.Sprintf("llm-analysis-%s", result.Role)
298+
299+
if err := prov.CreateComment(ctx, event, comment, updateMarker); err != nil {
300+
return fmt.Errorf("failed to create PR comment: %w", err)
301+
}
302+
303+
logger.Infof("Posted LLM analysis as PR comment for role %s", result.Role)
304+
return nil
305+
}
306+
259307
// getContextCacheKey generates a unique key for a context configuration.
260308
func getContextCacheKey(config *v1alpha1.ContextConfig) string {
261309
if config == nil {
@@ -275,7 +323,6 @@ func getContextCacheKey(config *v1alpha1.ContextConfig) string {
275323
)
276324
}
277325

278-
// countSuccessfulResults counts the number of successful analysis results.
279326
func countSuccessfulResults(results []AnalysisResult) int {
280327
count := 0
281328
for _, result := range results {
@@ -286,7 +333,6 @@ func countSuccessfulResults(results []AnalysisResult) int {
286333
return count
287334
}
288335

289-
// countFailedResults counts the number of failed analysis results.
290336
func countFailedResults(results []AnalysisResult) int {
291337
count := 0
292338
for _, result := range results {
@@ -299,17 +345,16 @@ func countFailedResults(results []AnalysisResult) int {
299345

300346
// shouldTriggerRole evaluates the CEL expression to determine if a role should be triggered.
301347
// If no on_cel is provided, defaults to triggering only for failed PipelineRuns.
302-
func (a *Analyzer) shouldTriggerRole(role v1alpha1.AnalysisRole, celContext map[string]any, pr *tektonv1.PipelineRun) (bool, error) {
348+
func shouldTriggerRole(role v1alpha1.AnalysisRole, celContext map[string]any, pr *tektonv1.PipelineRun) (bool, error) {
303349
if role.OnCEL == "" {
304350
succeededCondition := pr.Status.GetCondition(apis.ConditionSucceeded)
305-
306351
return succeededCondition != nil && succeededCondition.Status == corev1.ConditionFalse, nil
307352
}
308353

309354
result, err := cel.Value(role.OnCEL, celContext["body"],
310-
make(map[string]string), // headers - empty for pipeline context
311-
make(map[string]string), // pac params - empty for now
312-
make(map[string]any)) // files - empty for pipeline context
355+
make(map[string]string),
356+
make(map[string]string),
357+
make(map[string]any))
313358
if err != nil {
314359
return false, fmt.Errorf("failed to evaluate CEL expression '%s': %w", role.OnCEL, err)
315360
}
@@ -321,8 +366,8 @@ func (a *Analyzer) shouldTriggerRole(role v1alpha1.AnalysisRole, celContext map[
321366
return false, fmt.Errorf("CEL expression '%s' did not return boolean value", role.OnCEL)
322367
}
323368

324-
// validateConfig validates the AI analysis configuration.
325-
func (a *Analyzer) validateConfig(config *v1alpha1.AIAnalysisConfig) error {
369+
// validateAnalysisConfig validates the AI analysis configuration.
370+
func validateAnalysisConfig(config *v1alpha1.AIAnalysisConfig) error {
326371
if config.Provider == "" {
327372
return fmt.Errorf("provider is required")
328373
}
@@ -352,26 +397,3 @@ func (a *Analyzer) validateConfig(config *v1alpha1.AIAnalysisConfig) error {
352397

353398
return nil
354399
}
355-
356-
// createClient creates an LLM client based on the configuration and role.
357-
func (a *Analyzer) createClient(ctx context.Context, config *v1alpha1.AIAnalysisConfig, namespace string, role *v1alpha1.AnalysisRole) (ltypes.Client, error) {
358-
clientConfig := &ClientConfig{
359-
Provider: ltypes.AIProvider(config.Provider),
360-
APIURL: config.GetAPIURL(),
361-
Model: role.GetModel(),
362-
TokenSecretRef: config.TokenSecretRef,
363-
TimeoutSeconds: config.TimeoutSeconds,
364-
MaxTokens: config.MaxTokens,
365-
}
366-
367-
if err := a.factory.ValidateConfig(clientConfig); err != nil {
368-
return nil, fmt.Errorf("invalid client configuration: %w", err)
369-
}
370-
371-
return a.factory.CreateClient(ctx, clientConfig, namespace)
372-
}
373-
374-
// GetSupportedProviders returns the list of supported LLM providers.
375-
func (a *Analyzer) GetSupportedProviders() []ltypes.AIProvider {
376-
return a.factory.GetSupportedProviders()
377-
}

0 commit comments

Comments
 (0)