-
Notifications
You must be signed in to change notification settings - Fork 61
Expand file tree
/
Copy pathperf_emulation_handle_results.yml
More file actions
170 lines (151 loc) · 6.14 KB
/
perf_emulation_handle_results.yml
File metadata and controls
170 lines (151 loc) · 6.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
name: Emulated Performance Test Results Handler
on:
workflow_run:
workflows: [Emulated Performance Test]
types:
- completed
concurrency:
group: ${{ github.event.workflow_run.event }}-${{ github.event.workflow_run.head_branch }}-${{ github.workflow }}
cancel-in-progress: true
permissions:
contents: write
pull-requests: write
jobs:
check_scripts:
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-24.04
name: ARM Emulated Benchmark - Script Check
strategy:
fail-fast: false
matrix:
test_script:
- scripts/perf/tests/benchmark_results_comment/test.sh
- scripts/perf/tests/serialize_results/test.sh
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Check Script
run: |
pip3 install msgpack==1.1.0
./${{ matrix.test_script }}
handle_results:
needs: check_scripts
if: ${{ github.event.workflow_run.conclusion == 'success' }}
runs-on: ubuntu-24.04
name: ARM Emulated Benchmark - Handle Results
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Download Results from master
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
uses: dawidd6/action-download-artifact@v12
with:
workflow: perf_emulation.yml
path: artifacts
allow_forks: false
- name: Download Results from PR
if: ${{ github.event.workflow_run.event == 'pull_request' }}
uses: dawidd6/action-download-artifact@v12
with:
workflow: perf_emulation.yml
path: artifacts
# The artifact needs to be downloaded from a PR run that comes from a forked repository
allow_forks: true
- name: Move JSON files to a single folder
run: |
mkdir input
find artifacts -name "*.json" -exec mv {} input/ \;
- name: Collect 'master' Results
uses: robinraju/release-downloader@v1
continue-on-error: true # The release may not exist yet
with:
preRelease: true
tag: emulated-benchmark-latest
fileName: results*.mpk
- name: Move PR data files to current folder
if: ${{ github.event.workflow_run.event == 'pull_request' }}
run: |
mv artifacts/pr_number/pr_number .
- name: Prepare Comment
if: ${{ github.event.workflow_run.event == 'pull_request' }}
run: |
pip3 install msgpack==1.1.0
if ls results*.mpk 1> /dev/null 2>&1; then
python3 scripts/perf/benchmark_results_comment.py \
--previous results*.mpk \
--new input/results*.json \
--output comment.md
else
echo "No previous results found, generating comment without comparison."
python3 scripts/perf/benchmark_results_comment.py \
--new input/results*.json \
--output comment.md
fi
- name: Comment PR
if: ${{ github.event.workflow_run.event == 'pull_request' }}
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
const commentPath = 'comment.md';
const prPath = 'pr_number';
if (!fs.existsSync(commentPath)) {
throw new Error('Error: comment.md not found! Exiting.');
}
if (!fs.existsSync(prPath)) {
throw new Error('Error: pr_number not found! Exiting.');
}
const commentBody = fs.readFileSync(commentPath, 'utf8').trim();
const prNumber = Number(fs.readFileSync(prPath, 'utf8').trim());
// Try to find if a comment already exists so we avoid spamming the PR with comments
const { data: comments } = await github.rest.issues.listComments({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
});
const existingComment = comments.find(comment =>
comment.body.includes(':robot: This comment was automatically generated by a bot.')
);
try {
// Now we either edit the already existing comment or we generate a new one
if (existingComment) {
await github.rest.issues.updateComment({
comment_id: existingComment.id,
owner: context.repo.owner,
repo: context.repo.repo,
body: commentBody
});
console.log(`Updated existing comment (ID: ${existingComment.id})`);
} else {
await github.rest.issues.createComment({
issue_number: prNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body: commentBody
});
console.log('Created new comment');
}
} catch (error) {
console.error("Error:", error.message);
}
- name: Serialize Results
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
run: |
# Here the input folder already exists from a previous step
pip3 install msgpack==1.1.0
mkdir output
find . -maxdepth 1 \( -name "results*.mpk" \) -exec mv -t input {} +
python scripts/perf/serialize_results.py --input input --output output --commit-hash ${{ github.sha }}
- name: Store Results in Benchmark Release
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
uses: softprops/action-gh-release@v2
with:
name: Emulated Benchmark Latest
files: output/results*.mpk
tag_name: emulated-benchmark-latest
prerelease: true
body: This pre-release is automatically generated and serves as a repository for benchmark results.