Skip to content

Commit 63ee348

Browse files
authored
Merge branch 'nf-core:master' into master
2 parents 26d9a22 + f54d064 commit 63ee348

91 files changed

Lines changed: 1506 additions & 1099 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.github/CODEOWNERS

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,5 @@
1111
**/lrz_cm4** @nschan
1212
**/crg** @joseespinosa
1313
**/iris** @nikhil
14+
**/mahuika** @jen-reeve
15+
**/purdue_** @aseetharam

.github/workflows/main.yml

Lines changed: 41 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
name: Configs tests
2-
3-
on: [pull_request, push]
2+
on:
3+
pull_request:
4+
branches:
5+
- "*"
6+
push:
7+
branches:
8+
- master
49

510
# Cancel if a newer run is started
611
concurrency:
@@ -17,21 +22,45 @@ jobs:
1722
run: python ${GITHUB_WORKSPACE}/bin/cchecker.py ${GITHUB_WORKSPACE}/nfcore_custom.config ${GITHUB_WORKSPACE}/.github/workflows/main.yml
1823

1924
check_nextflow_config:
20-
runs-on: ubuntu-latest
2125
name: Check if nextflow config runs in repository root
26+
runs-on: ubuntu-latest
27+
strategy:
28+
fail-fast: false
29+
matrix:
30+
NXF_VER:
31+
- 25.04.0
32+
- latest-everything
2233
steps:
2334
- uses: actions/checkout@v4
24-
- name: Install Nextflow
25-
run: |
26-
wget -qO- get.nextflow.io | bash
27-
sudo mv nextflow /usr/local/bin/
35+
- name: Set up Nextflow
36+
uses: nf-core/setup-nextflow@v2
37+
with:
38+
version: ${{ matrix.NXF_VER }}
2839
- run: nextflow config -show-profiles ${GITHUB_WORKSPACE}
2940

41+
lint_nextflow_config:
42+
name: Check if nextflow files are valid with nextflow lint
43+
runs-on: ubuntu-latest
44+
strategy:
45+
fail-fast: false
46+
matrix:
47+
NXF_VER:
48+
- 25.04.0
49+
- latest-everything
50+
steps:
51+
- uses: actions/checkout@v4
52+
- name: Set up Nextflow
53+
uses: nf-core/setup-nextflow@v2
54+
with:
55+
version: ${{ matrix.NXF_VER }}
56+
- run: nextflow lint ${GITHUB_WORKSPACE}
57+
3058
profile_test:
3159
runs-on: ubuntu-latest
3260
name: Run ${{ matrix.profile }} profile
3361
needs: test_all_profiles
3462
strategy:
63+
fail-fast: false
3564
matrix:
3665
profile:
3766
- "abims"
@@ -46,7 +75,6 @@ jobs:
4675
- "bi"
4776
- "bigpurple"
4877
- "bih"
49-
- "binac"
5078
- "binac2"
5179
- "biohpc_gen"
5280
- "biowulf"
@@ -117,6 +145,7 @@ jobs:
117145
- "lugh"
118146
- "m3c"
119147
- "maestro"
148+
- "mahuika"
120149
- "mana"
121150
- "marjorie"
122151
- "lovelace"
@@ -138,9 +167,11 @@ jobs:
138167
- "pawsey_nimbus"
139168
- "pawsey_setonix"
140169
- "pdc_kth"
141-
- "pe2"
142170
- "phoenix"
143171
- "psmn"
172+
- "purdue_bell"
173+
- "purdue_gautschi"
174+
- "purdue_negishi"
144175
- "qmul_apocrita"
145176
- "rki"
146177
- "rosalind"
@@ -156,7 +187,6 @@ jobs:
156187
- "seg_globe"
157188
- "self_hosted_runner"
158189
- "shu_bmrc"
159-
- "software_license"
160190
- "stjude"
161191
- "tes"
162192
- "tigem"
@@ -198,7 +228,7 @@ jobs:
198228
uses: nf-core/setup-nextflow@v2
199229
with:
200230
version: "latest-everything"
201-
- name: Check ${{ matrix.profile }} profile
231+
- name: Check ${{ matrix.profile }} profile
202232
env:
203233
SCRATCH: "~"
204234
NXF_GLOBAL_CONFIG: awsbatch.config

conf/alliance_canada.config

Lines changed: 48 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,30 @@
11
params {
2-
config_profile_description = 'Alliance Canada HPC config'
3-
config_profile_contact = 'Jerry Li (@jerryakii)'
2+
config_profile_contact = 'Cian Monnin (github/CMonnin)'
43
config_profile_url = 'https://docs.alliancecan.ca/wiki/Nextflow'
54

5+
// default to narval settings if no cluster_name defined
6+
cluster_name = System.getenv('CC_CLUSTER') ?: 'narval'
7+
8+
config_profile_description = params.cluster_name == 'narval' ? 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.' :
9+
params.cluster_name == 'fir' ? 'Alliance Canada (Fir) cluster profile provided by nf-core/configs.' :
10+
params.cluster_name == 'nibi' ? 'Alliance Canada (Nibi) cluster profile provided by nf-core/configs.' :
11+
params.cluster_name == 'rorqual' ? 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.' :
12+
params.cluster_name == 'trillium' ? 'Alliance Canada (Trillium) cluster profile provided by nf-core/configs.' :
13+
'Alliance Canada HPC config'
14+
15+
max_cpus = params.cluster_name == 'nibi' ? 192 :
16+
params.cluster_name == 'rorqual' ? 192 :
17+
params.cluster_name == 'fir' ? 192 :
18+
params.cluster_name == 'trillium' ? null :
19+
64
20+
max_memory =
21+
params.cluster_name == 'narval' ? 249.GB :
22+
params.cluster_name == 'nibi' ? 750.GB :
23+
params.cluster_name == 'rorqual' ? 750.GB :
24+
params.cluster_name == 'fir' ? 750.GB :
25+
params.cluster_name == 'trillium' ? null :
26+
240.GB
627
max_time = 168.h
7-
max_cpus = 64
8-
max_memory = 240.GB
928
}
1029

1130
cleanup = true
@@ -16,109 +35,42 @@ singularity {
1635
}
1736

1837
apptainer {
19-
autoMounts = true }
38+
autoMounts = true
39+
}
2040

2141
// Group name for resource allocation must be supplied as environment variable
2242
process {
2343
executor = 'slurm'
24-
clusterOptions = "--account=${System.getenv('SLURM_ACCOUNT')}"
44+
clusterOptions = params.cluster_name == 'trillium' ? "--account=${System.getenv('SLURM_ACCOUNT')} --nodes=1": "--account=${System.getenv('SLURM_ACCOUNT')}"
2545
maxRetries = 1
2646
errorStrategy = { task.exitStatus in [125,139] ? 'retry' : 'finish' }
27-
cpu = 1
28-
time = '3h'
47+
cpus = 1
48+
time = '1h'
49+
50+
// NOTE:
51+
// these resourceLimits are set to baseline CPU for each cluser
52+
// Currently missing are configs for GPUs
53+
2954
resourceLimits = [
30-
memory: 240.GB,
31-
cpus: 64,
32-
time: 168.h
33-
]
55+
cpus: params.cluster_name == 'nibi'? 192 :
56+
params.cluster_name == 'rorqual' ? 192 :
57+
params.cluster_name == 'fir' ? 192 :
58+
params.cluster_name == 'trillium' ? null:
59+
64,
60+
memory: params.cluster_name == 'narval' ? 249.GB :
61+
params.cluster_name == 'nibi' ? 750.GB :
62+
params.cluster_name == 'rorqual' ? 750.GB :
63+
params.cluster_name == 'fir' ? 750.GB :
64+
params.cluster_name == 'trillium' ? null :
65+
240.GB,
66+
time: 168.h
67+
]
68+
3469
}
3570

3671
executor {
3772
pollInterval = '60 sec'
3873
submitRateLimit = '60/1min'
39-
queueSize = 100
40-
}
41-
42-
// Cluster name is available as environment variable
43-
// If not found, default to narval as it has the lowest limits
44-
hostname = "narval"
45-
try {
46-
hostname = "${System.getenv('HOSTNAME')}"
47-
} catch (java.io.IOException e) {
48-
System.err.println("WARNING: Could not determine current cluster, defaulting to narval")
74+
queueSize = params.cluster_name == 'trillium' ? 500 : 100
4975
}
5076

51-
// Cluster Narval
52-
if (hostname.startsWith("narval")) {
53-
params.config_profile_description = 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.'
54-
params.max_memory = 249.GB
55-
params.max_cpus = 64
56-
process {
57-
resourceLimits = [
58-
memory: 249.GB,
59-
cpus: 64,
60-
time: 168.h
61-
]
62-
}
63-
}
64-
65-
// Cluster Rorqual
66-
if (hostname.startsWith("rorqual")) {
67-
params.config_profile_description = 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.'
68-
params.max_memory = 750.GB
69-
params.max_cpus = 192
70-
process {
71-
resourceLimits = [
72-
memory: 750.GB,
73-
cpus: 192,
74-
time: 168.h
75-
]
76-
}
77-
}
78-
79-
// Cluster Trillium
80-
if (hostname.startsWith("tri")) {
81-
params.config_profile_description = 'Alliance Canada (Trillium) cluster profile provided by nf-core/configs.'
82-
params.max_memory = null
83-
params.max_cpus = null
84-
process {
85-
clusterOptions = "--account=${System.getenv('SLURM_ACCOUNT')} --nodes=1"
86-
resourceLimits = [
87-
time: 168.h
88-
]
89-
}
90-
executor {
91-
queueSize = 500
92-
}
93-
}
94-
95-
// Cluster Nibi
96-
if (hostname.contains("nibi")) {
97-
params.config_profile_description = 'Alliance Canada (Nibi) cluster profile provided by nf-core/configs.'
98-
params.max_memory = 750.GB
99-
params.max_cpus = 192
100-
process {
101-
resourceLimits = [
102-
memory: 750.GB,
103-
cpus: 192,
104-
time: 168.h
105-
]
106-
}
107-
}
108-
109-
// Cluster Fir
110-
// not sure what the hostname is so set it as a profile
111-
profiles {
112-
fir {
113-
params.config_profile_description = 'Alliance Canada (Fir) cluster profile provided by nf-core/configs.'
114-
params.max_memory = 750.GB
115-
params.max_cpus = 192
116-
process {
117-
resourceLimits = [
118-
memory: 750.GB,
119-
cpus: 192,
120-
time: 168.h
121-
]
122-
}
123-
}
124-
}

conf/bi.config

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
1-
params{
1+
// Set parameters to ignore for validation
2+
validation {
3+
ignoreParams = ['bi_globalConfig']
4+
}
5+
6+
params {
27
config_profile_description = 'Boehringer Ingelheim internal profile provided by nf-core/configs.'
38
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
49
config_profile_url = 'https://www.boehringer-ingelheim.com/'
10+
bi_globalConfig = System.getenv('NXF_GLOBAL_CONFIG') ?:
11+
System.err.println("WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile.")
512
}
613

7-
params.globalConfig = System.getenv('NXF_GLOBAL_CONFIG')
8-
if(params.globalConfig == null)
9-
{
10-
def errorMessage = "WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile."
11-
System.err.println(errorMessage)
12-
}else{
13-
includeConfig params.globalConfig
14-
}
14+
// Include the global config if set
15+
includeConfig(params.bi_globalConfig ?: '/dev/null')

conf/bigpurple.config

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
1-
singularityDir = "/gpfs/scratch/${USER}/singularity_images_nextflow"
2-
31
params {
42
config_profile_description = """
53
NYU School of Medicine BigPurple cluster profile provided by nf-core/configs.
64
module load both singularity/3.1 and squashfs-tools/4.3 before running the pipeline with this profile!!
75
Run from your scratch or lab directory - Nextflow makes a lot of files!!
8-
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in ${singularityDir}
6+
Also consider running the pipeline on a compute node (srun --pty /bin/bash -t=01:00:00) the first time, as it will be pulling the docker image, which will be converted into a singularity image, which is heavy on the login node and will take some time. Subsequent runs can be done on the login node, as the docker image will only be pulled and converted once. By default the images will be stored in /gpfs/scratch/${System.getenv("USER")}/singularity_images_nextflow
97
""".stripIndent()
108
config_profile_contact = 'Tobias Schraink (@tobsecret)'
119
config_profile_url = 'https://github.com/nf-core/configs/blob/master/docs/bigpurple.md'
@@ -14,7 +12,7 @@ params {
1412
singularity {
1513
enabled = true
1614
autoMounts = true
17-
cacheDir = singularityDir
15+
cacheDir = "/gpfs/scratch/${System.getenv("USER")}/singularity_images_nextflow"
1816
}
1917

2018
process {

conf/bih.config

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ process {
2323
apptainer {
2424
enabled = true
2525
autoMounts = true
26-
cacheDir = "${params.scratch}/apptainer_img_${USER}"
26+
cacheDir = "${params.scratch}/apptainer_img_${System.getenv('USER')}"
2727
}
2828

2929
cleanup = true
30-
workDir = "${params.scratch}/work_${USER}"
30+
workDir = "${params.scratch}/work_${System.getenv('USER')}"
3131

3232
profiles {
3333
debug {

conf/binac.config

Lines changed: 0 additions & 30 deletions
This file was deleted.

conf/binac2.config

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ profiles {
2222
enabled = true
2323
autoMounts = true
2424
pullTimeout = '120m'
25-
cacheDir = "/pfs/10/project/apptainer_cache/${USER}"
25+
cacheDir = "/pfs/10/project/apptainer_cache/${System.getenv('USER')}"
2626
envWhitelist = 'CUDA_VISIBLE_DEVICES'
2727
}
2828

0 commit comments

Comments
 (0)