Skip to content

Commit 0cda9f9

Browse files
authored
Merge branch 'master' into kaust
2 parents 20cee64 + 88f47fe commit 0cda9f9

27 files changed

Lines changed: 696 additions & 176 deletions

.github/CODEOWNERS

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,10 @@
77
**/unsw_katana** @jscgh
88
**/seadragon** @jiawku
99
**/fred_hutch** @derrik-gratz
10+
**/nci_gadi** @georgiesamaha @kisarur @mattdton
1011
**/roslin** @sguizard @donalddunbar
1112
**/lrz_cm4** @nschan
1213
**/crg** @joseespinosa
1314
**/iris** @nikhil
1415
**/mahuika** @jen-reeve
16+
**/purdue_** @aseetharam

.github/workflows/main.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,6 +169,9 @@ jobs:
169169
- "pdc_kth"
170170
- "phoenix"
171171
- "psmn"
172+
- "purdue_bell"
173+
- "purdue_gautschi"
174+
- "purdue_negishi"
172175
- "qmul_apocrita"
173176
- "rki"
174177
- "rosalind"

conf/alliance_canada.config

Lines changed: 48 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,30 @@
11
params {
2-
config_profile_description = 'Alliance Canada HPC config'
3-
config_profile_contact = 'Jerry Li (@jerryakii)'
2+
config_profile_contact = 'Cian Monnin (github/CMonnin)'
43
config_profile_url = 'https://docs.alliancecan.ca/wiki/Nextflow'
54

5+
// default to narval settings if no cluster_name defined
6+
cluster_name = System.getenv('CC_CLUSTER') ?: 'narval'
7+
8+
config_profile_description = params.cluster_name == 'narval' ? 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.' :
9+
params.cluster_name == 'fir' ? 'Alliance Canada (Fir) cluster profile provided by nf-core/configs.' :
10+
params.cluster_name == 'nibi' ? 'Alliance Canada (Nibi) cluster profile provided by nf-core/configs.' :
11+
params.cluster_name == 'rorqual' ? 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.' :
12+
params.cluster_name == 'trillium' ? 'Alliance Canada (Trillium) cluster profile provided by nf-core/configs.' :
13+
'Alliance Canada HPC config'
14+
15+
max_cpus = params.cluster_name == 'nibi' ? 192 :
16+
params.cluster_name == 'rorqual' ? 192 :
17+
params.cluster_name == 'fir' ? 192 :
18+
params.cluster_name == 'trillium' ? null :
19+
64
20+
max_memory =
21+
params.cluster_name == 'narval' ? 249.GB :
22+
params.cluster_name == 'nibi' ? 750.GB :
23+
params.cluster_name == 'rorqual' ? 750.GB :
24+
params.cluster_name == 'fir' ? 750.GB :
25+
params.cluster_name == 'trillium' ? null :
26+
240.GB
627
max_time = 168.h
7-
max_cpus = 64
8-
max_memory = 240.GB
928
}
1029

1130
cleanup = true
@@ -16,109 +35,42 @@ singularity {
1635
}
1736

1837
apptainer {
19-
autoMounts = true }
38+
autoMounts = true
39+
}
2040

2141
// Group name for resource allocation must be supplied as environment variable
2242
process {
2343
executor = 'slurm'
24-
clusterOptions = "--account=${System.getenv('SLURM_ACCOUNT')}"
44+
clusterOptions = params.cluster_name == 'trillium' ? "--account=${System.getenv('SLURM_ACCOUNT')} --nodes=1": "--account=${System.getenv('SLURM_ACCOUNT')}"
2545
maxRetries = 1
2646
errorStrategy = { task.exitStatus in [125,139] ? 'retry' : 'finish' }
27-
cpu = 1
28-
time = '3h'
47+
cpus = 1
48+
time = '1h'
49+
50+
// NOTE:
51+
// these resourceLimits are set to baseline CPU for each cluser
52+
// Currently missing are configs for GPUs
53+
2954
resourceLimits = [
30-
memory: 240.GB,
31-
cpus: 64,
32-
time: 168.h
33-
]
55+
cpus: params.cluster_name == 'nibi'? 192 :
56+
params.cluster_name == 'rorqual' ? 192 :
57+
params.cluster_name == 'fir' ? 192 :
58+
params.cluster_name == 'trillium' ? null:
59+
64,
60+
memory: params.cluster_name == 'narval' ? 249.GB :
61+
params.cluster_name == 'nibi' ? 750.GB :
62+
params.cluster_name == 'rorqual' ? 750.GB :
63+
params.cluster_name == 'fir' ? 750.GB :
64+
params.cluster_name == 'trillium' ? null :
65+
240.GB,
66+
time: 168.h
67+
]
68+
3469
}
3570

3671
executor {
3772
pollInterval = '60 sec'
3873
submitRateLimit = '60/1min'
39-
queueSize = 100
40-
}
41-
42-
// Cluster name is available as environment variable
43-
// If not found, default to narval as it has the lowest limits
44-
hostname = "narval"
45-
try {
46-
hostname = "${System.getenv('HOSTNAME')}"
47-
} catch (java.io.IOException e) {
48-
System.err.println("WARNING: Could not determine current cluster, defaulting to narval")
74+
queueSize = params.cluster_name == 'trillium' ? 500 : 100
4975
}
5076

51-
// Cluster Narval
52-
if (hostname.startsWith("narval")) {
53-
params.config_profile_description = 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.'
54-
params.max_memory = 249.GB
55-
params.max_cpus = 64
56-
process {
57-
resourceLimits = [
58-
memory: 249.GB,
59-
cpus: 64,
60-
time: 168.h
61-
]
62-
}
63-
}
64-
65-
// Cluster Rorqual
66-
if (hostname.startsWith("rorqual")) {
67-
params.config_profile_description = 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.'
68-
params.max_memory = 750.GB
69-
params.max_cpus = 192
70-
process {
71-
resourceLimits = [
72-
memory: 750.GB,
73-
cpus: 192,
74-
time: 168.h
75-
]
76-
}
77-
}
78-
79-
// Cluster Trillium
80-
if (hostname.startsWith("tri")) {
81-
params.config_profile_description = 'Alliance Canada (Trillium) cluster profile provided by nf-core/configs.'
82-
params.max_memory = null
83-
params.max_cpus = null
84-
process {
85-
clusterOptions = "--account=${System.getenv('SLURM_ACCOUNT')} --nodes=1"
86-
resourceLimits = [
87-
time: 168.h
88-
]
89-
}
90-
executor {
91-
queueSize = 500
92-
}
93-
}
94-
95-
// Cluster Nibi
96-
if (hostname.contains("nibi")) {
97-
params.config_profile_description = 'Alliance Canada (Nibi) cluster profile provided by nf-core/configs.'
98-
params.max_memory = 750.GB
99-
params.max_cpus = 192
100-
process {
101-
resourceLimits = [
102-
memory: 750.GB,
103-
cpus: 192,
104-
time: 168.h
105-
]
106-
}
107-
}
108-
109-
// Cluster Fir
110-
// not sure what the hostname is so set it as a profile
111-
profiles {
112-
fir {
113-
params.config_profile_description = 'Alliance Canada (Fir) cluster profile provided by nf-core/configs.'
114-
params.max_memory = 750.GB
115-
params.max_cpus = 192
116-
process {
117-
resourceLimits = [
118-
memory: 750.GB,
119-
cpus: 192,
120-
time: 168.h
121-
]
122-
}
123-
}
124-
}

conf/bi.config

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,9 @@ params {
77
config_profile_description = 'Boehringer Ingelheim internal profile provided by nf-core/configs.'
88
config_profile_contact = 'Alexander Peltzer (@apeltzer)'
99
config_profile_url = 'https://www.boehringer-ingelheim.com/'
10+
bi_globalConfig = System.getenv('NXF_GLOBAL_CONFIG') ?:
11+
System.err.println("WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile.")
1012
}
1113

12-
params.bi_globalConfig = System.getenv('NXF_GLOBAL_CONFIG')
13-
if (params.bi_globalConfig == null) {
14-
System.err.println("WARNING: For bi.config requires NXF_GLOBAL_CONFIG env var to be set. Point it to global.config file if you want to use this profile.")
15-
} else {
16-
includeConfig params.bi_globalConfig
17-
}
14+
// Include the global config if set
15+
includeConfig(params.bi_globalConfig ?: '/dev/null')

conf/embl_hd.config

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ apptainer {
1717
autoMounts = true
1818
pullTimeout = "3 hours" // the default is 20 minutes and fails with large images
1919
envWhitelist = 'CUDA_VISIBLE_DEVICES' // allow the bounding of GPU visible device variable into the containers
20+
libraryDir = '/cvmfs/singularity.galaxyproject.org/all/'
2021
}
2122

2223
singularity {
@@ -47,7 +48,7 @@ process {
4748
}
4849

4950
executor {
50-
name = "slurm"
51+
name = 'slurm'
5152
queueSize = 200
5253
submitRateLimit = "10/1sec"
5354
pollInterval = '10sec'

conf/incliva.config

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,20 @@ params {
33
config_profile_description = 'INCLIVA Health Research Institute profile for nf-core.'
44
config_profile_contact = 'Sheila Zúñiga Trejos - bioinfo@incliva.es'
55
config_profile_url = 'https://www.incliva.es/en/services/platforms/bioinformatics-unit/'
6+
7+
warning_message = {
8+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
9+
System.out.println("WARNING:")
10+
System.out.println("WARNING: THIS CONFIG IS NO LONGER MAINTAINED.")
11+
System.out.println("WARNING:")
12+
System.out.println("WARNING: THIS CONFIG WILL BE DEPRECATED BY THE END OF MAY 2026 DUE TO AN UPCOMING NEXTFLOW VERSION THAT WILL NOT BE BACKWARDS COMPATIBLE.")
13+
System.out.println("WARNING: MODIFICATIONS TO THIS CONFIG AND TESTING BY USERS OF THE INFRASTRUCTURE ARE REQUIRED TO ENSURE THE CONFIG REMAINS FUNCTIONAL.")
14+
System.out.println("WARNING:")
15+
System.out.println("WARNING: PLEASE GET IN CONTACT WITH THE NF-CORE COMMUNITY VIA SLACK (#configs CHANNEL) OR EMAIL (https://nf-co.re/join) ASAP")
16+
System.out.println("WARNING: TO ALLOW CONTINUED USE OF YOUR CONFIG WITH NF-CORE PIPELINES")
17+
System.out.println("WARNING:")
18+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
19+
}.call()
620
}
721

822
// Function to get hostname

conf/iris.config

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ params.singularity_library_dir = env('NXF_SINGULARITY_LIBRARYDIR') ?: par
5252
executor {
5353
name = 'slurm'
5454
pollInterval = 45.s
55-
queueSize = 5000
55+
queueSize = 500
5656
queueStatInterval = '1 min'
5757
submitRateLimit = '95/1min'
5858
retry.delay = '1s'

conf/nci_gadi.config

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,33 @@
11
// NCI Gadi nf-core configuration profile
22
params {
33
config_profile_description = 'NCI Gadi HPC profile provided by nf-core/configs'
4-
config_profile_contact = 'Georgie Samaha (@georgiesamaha), Matthew Downton (@mattdton)'
4+
config_profile_contact = 'Georgie Samaha (@georgiesamaha), Kisaru Liyanage (@kisarur), Matthew Downton (@mattdton)'
55
config_profile_url = 'https://opus.nci.org.au/display/Help/Gadi+User+Guide'
6-
project = System.getenv("PROJECT")
6+
nci_gadi_project = System.getenv("PROJECT")
7+
nci_gadi_storage = "gdata/${params.nci_gadi_project}+scratch/${params.nci_gadi_project}"
78
}
89

10+
validation.ignoreParams = ["nci_gadi_project", "nci_gadi_storage"]
11+
912
// Enable use of Singularity to run containers
1013
singularity {
1114
enabled = true
1215
autoMounts = true
16+
cacheDir = "/scratch/${params.nci_gadi_project}/${System.getenv('USER')}/nxf_singularity_cache"
1317
}
1418

1519
// Submit up to 300 concurrent jobs (Gadi exec max)
16-
// pollInterval and queueStatInterval of every 5 minutes
17-
// submitRateLimit of 20 per minute
1820
executor {
1921
queueSize = 300
20-
pollInterval = '5 min'
21-
queueStatInterval = '5 min'
22-
submitRateLimit = '20 min'
2322
}
2423

2524
// Define process resource limits
2625
process {
2726
executor = 'pbspro'
28-
storage = "scratch/${params.project}"
27+
project = "${params.nci_gadi_project}" // The version of Nextflow installed on Gadi has been modified to allow usage of this non standard directive
28+
storage = "${params.nci_gadi_storage}" // The version of Nextflow installed on Gadi has been modified to allow usage of this non standard directive
2929
module = 'singularity'
3030
cache = 'lenient'
3131
stageInMode = 'symlink'
3232
queue = { task.memory < 128.GB ? 'normalbw' : (task.memory >= 128.GB && task.memory <= 190.GB ? 'normal' : (task.memory > 190.GB && task.memory <= 1020.GB ? 'hugemembw' : '')) }
33-
beforeScript = 'module load singularity'
34-
}
35-
36-
// Write custom trace file with outputs required for SU calculation
37-
def trace_timestamp = new java.util.Date().format('yyyy-MM-dd_HH-mm-ss')
38-
trace {
39-
enabled = true
40-
overwrite = false
41-
file = "./gadi-nf-core-trace-${trace_timestamp}.txt"
42-
fields = 'name,status,exit,duration,realtime,cpus,%cpu,memory,%mem,rss'
4333
}

conf/pipeline/eager/maestro.config

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,21 @@
66
*/
77

88
params {
9-
109
config_profile_name = 'nf-core/eager nuclear/mitocondrial - human profiles'
11-
1210
config_profile_description = "Simple profiles for assessing computational ressources that fit human nuclear dna, human mitogenomes processing. unlimitedtime is also available "
11+
warning_message = {
12+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
13+
System.out.println("WARNING:")
14+
System.out.println("WARNING: THIS CONFIG IS NO LONGER MAINTAINED.")
15+
System.out.println("WARNING:")
16+
System.out.println("WARNING: THIS CONFIG WILL BE DEPRECATED BY THE END OF MAY 2026 DUE TO AN UPCOMING NEXTFLOW VERSION THAT WILL NOT BE BACKWARDS COMPATIBLE.")
17+
System.out.println("WARNING: MODIFICATIONS TO THIS CONFIG AND TESTING BY USERS OF THE INFRASTRUCTURE ARE REQUIRED TO ENSURE THE CONFIG REMAINS FUNCTIONAL.")
18+
System.out.println("WARNING:")
19+
System.out.println("WARNING: PLEASE GET IN CONTACT WITH THE NF-CORE COMMUNITY VIA SLACK (#configs CHANNEL) OR EMAIL (https://nf-co.re/join) ASAP")
20+
System.out.println("WARNING: TO ALLOW CONTINUED USE OF YOUR CONFIG WITH NF-CORE PIPELINES")
21+
System.out.println("WARNING:")
22+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
23+
}.call()
1324
}
1425

1526

conf/pipeline/sarek/icr_davros.config

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,19 @@ process {
1111
time = { check_resource(48.h * task.attempt) }
1212
}
1313
}
14+
15+
params {
16+
warning_message = {
17+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
18+
System.out.println("WARNING:")
19+
System.out.println("WARNING: THIS CONFIG IS NO LONGER MAINTAINED.")
20+
System.out.println("WARNING:")
21+
System.out.println("WARNING: THIS CONFIG WILL BE DEPRECATED BY THE END OF MAY 2026 DUE TO AN UPCOMING NEXTFLOW VERSION THAT WILL NOT BE BACKWARDS COMPATIBLE.")
22+
System.out.println("WARNING: MODIFICATIONS TO THIS CONFIG AND TESTING BY USERS OF THE INFRASTRUCTURE ARE REQUIRED TO ENSURE THE CONFIG REMAINS FUNCTIONAL.")
23+
System.out.println("WARNING:")
24+
System.out.println("WARNING: PLEASE GET IN CONTACT WITH THE NF-CORE COMMUNITY VIA SLACK (#configs CHANNEL) OR EMAIL (https://nf-co.re/join) ASAP")
25+
System.out.println("WARNING: TO ALLOW CONTINUED USE OF YOUR CONFIG WITH NF-CORE PIPELINES")
26+
System.out.println("WARNING:")
27+
System.out.println("WARNING: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
28+
}.call()
29+
}

0 commit comments

Comments
 (0)