Skip to content

Performance Regression Tests #146

Performance Regression Tests

Performance Regression Tests #146

name: Performance Regression Tests
on:
push:
branches: [ main, develop, '012-python-bindings-complete' ]
pull_request:
branches: [ main, develop ]
schedule:
# Run daily at 2 AM UTC
- cron: '0 2 * * *'
env:
CARGO_TERM_COLOR: always
permissions:
contents: read
pull-requests: write
issues: write
jobs:
baseline-benchmarks:
name: Baseline Performance Benchmarks
runs-on: ubuntu-latest
if: github.event_name == 'push' || github.event_name == 'schedule'
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo registry
uses: actions/cache@v4
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v4
with:
path: ~/.cargo/git
key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v4
with:
path: target
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install maturin pytest hypothesis
- name: Build release binary
run: cargo build --release
- name: Run criterion benchmarks
run: |
# Check if python_cli_comparison benchmark exists
if cargo bench --bench python_cli_comparison -- --list 2>/dev/null | grep -q "python_cli_comparison"; then
cargo bench --bench python_cli_comparison \
-- --output-format html \
--save-baseline baseline
else
echo "Warning: python_cli_comparison benchmark not found, skipping baseline benchmarks"
# Create empty baseline results for other jobs
mkdir -p target/criterion
echo "{}" > target/criterion/baseline_placeholder.json
fi
- name: Upload baseline results
uses: actions/upload-artifact@v4
with:
name: baseline-results
path: |
target/criterion/
**/benchmark_report.html
performance-comparison:
name: Performance Comparison
runs-on: ubuntu-latest
needs: baseline-benchmarks
if: github.event_name == 'pull_request'
steps:
- uses: actions/checkout@v4
with:
# Fetch the base branch for comparison
fetch-depth: 0
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo registry
uses: actions/cache@v4
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install maturin pytest hypothesis
- name: Build release binary
run: cargo build --release
- name: Download baseline results
uses: actions/download-artifact@v4
with:
name: baseline-results
path: target/criterion/
- name: Run benchmarks with baseline comparison
run: |
# Check if python_cli_comparison benchmark exists
if cargo bench --bench python_cli_comparison -- --list 2>/dev/null | grep -q "python_cli_comparison"; then
cargo bench --bench python_cli_comparison \
-- --baseline baseline \
--output-format html
else
echo "Warning: python_cli_comparison benchmark not found, skipping benchmark comparison"
# Create placeholder report
mkdir -p target/criterion
echo '{"summary": "Benchmark skipped - python_cli_comparison not found"}' > target/criterion/performance_report.json
fi
- name: Check for performance regressions
run: |
# Check if the script exists
if [ -f "scripts/check_performance_regression.py" ]; then
# Check if any benchmark regressed by more than 10%
python scripts/check_performance_regression.py \
--threshold 0.10 \
--baseline target/criterion/baseline/ \
--current target/criterion/
else
echo "Warning: check_performance_regression.py not found, skipping regression check"
fi
- name: Upload performance report
uses: actions/upload-artifact@v4
with:
name: performance-report-${{ github.sha }}
path: |
target/criterion/reports/
**/benchmark_report.html
- name: Comment PR with results
if: github.event_name == 'pull_request'
uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
const path = './target/criterion/performance_report.json';
if (fs.existsSync(path)) {
const report = JSON.parse(fs.readFileSync(path, 'utf8'));
const comment = `
## Performance Test Results
${report.summary}
${report.regressions.length > 0 ? '⚠️ **Performance regressions detected**' : '✅ **No significant regressions**'}
[View detailed report](${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID})
`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
}
memory-profiling:
name: Memory Profiling
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y valgrind massif-visualizer
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip
pip install maturin pytest memory-profiler
- name: Create virtual environment for maturin
run: |
python -m venv .venv
source .venv/bin/activate
pip install maturin pytest memory-profiler
- name: Build Python extension with debug symbols
run: |
source .venv/bin/activate
maturin develop --release
- name: Run memory profiling tests
run: |
# Check if the script exists
if [ -f "scripts/memory_profiling_test.py" ]; then
python scripts/memory_profiling_test.py
else
echo "Warning: memory_profiling_test.py not found, skipping memory profiling tests"
fi
- name: Upload memory profiling results
uses: actions/upload-artifact@v4
with:
name: memory-profiling-results
path: |
memory_profile_*.txt
massif.out.*
performance-trend-analysis:
name: Performance Trend Analysis
runs-on: ubuntu-latest
if: github.event_name == 'schedule'
needs: [baseline-benchmarks]
steps:
- uses: actions/checkout@v4
- name: Download all performance artifacts
uses: actions/download-artifact@v4
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install analysis dependencies
run: |
pip install pandas matplotlib seaborn plotly
- name: Analyze performance trends
run: |
# Check if the script exists
if [ -f "scripts/analyze_performance_trends.py" ]; then
python scripts/analyze_performance_trends.py
else
echo "Warning: analyze_performance_trends.py not found, skipping trend analysis"
fi
- name: Generate performance report
run: |
# Check if the script exists
if [ -f "scripts/generate_performance_report.py" ]; then
python scripts/generate_performance_report.py \
--output-dir performance_reports/$(date +%Y-%m-%d)
else
echo "Warning: generate_performance_report.py not found, skipping report generation"
mkdir -p performance_reports/$(date +%Y-%m-%d)
fi
- name: Upload trend analysis
uses: actions/upload-artifact@v4
with:
name: performance-trend-analysis-${{ github.run_number }}
path: |
performance_reports/
*.png
*.html
notify-performance-regression:
name: Notify on Performance Regression
runs-on: ubuntu-latest
needs: [performance-comparison]
if: failure()
steps:
- name: Create issue for performance regression
if: github.event_name == 'push' || github.event_name == 'schedule'
uses: actions/github-script@v6
with:
script: |
github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: 'Performance Regression Detected',
body: `
Performance regression detected in commit ${github.sha}.
Please review the performance test results and investigate the cause.
**Next steps:**
1. Review the performance benchmark results
2. Identify the cause of the regression
3. Fix the issue or create a follow-up ticket
4. Update performance baselines if the change is intentional
[View workflow run](${process.env.GITHUB_SERVER_URL}/${process.env.GITHUB_REPOSITORY}/actions/runs/${process.env.GITHUB_RUN_ID})
`,
labels: ['performance', 'regression', 'bug']
})