From af34a57e5b11bc1fdebe320fa198cc8b63e113f2 Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Fri, 1 Aug 2025 09:53:30 -0500 Subject: [PATCH 1/7] Unify nf-lang config scopes with runtime classes Signed-off-by: Ben Sherman --- docs/google.md | 2 + docs/notifications.md | 29 +- docs/reference/config.md | 752 +++++++++--------- docs/spack.md | 7 +- .../src/main/groovy/nextflow/Nextflow.groovy | 12 +- .../src/main/groovy/nextflow/Session.groovy | 168 +--- .../groovy/nextflow/conda/CondaCache.groovy | 5 +- .../groovy/nextflow/conda/CondaConfig.groovy | 102 ++- .../nextflow/config/ConfigBuilder.groovy | 21 +- .../groovy/nextflow/config/ConfigMap.groovy | 46 +- .../nextflow/config/ConfigValidator.groovy | 50 +- .../groovy/nextflow/config/Manifest.groovy | 295 ++++--- .../nextflow/config/WorkflowConfig.groovy | 141 ++++ .../container/ApptainerBuilder.groovy | 19 +- .../nextflow/container/ApptainerCache.groovy | 4 +- .../container/ApptainerConfig.groovy} | 77 +- .../container/CharliecloudBuilder.groovy | 29 +- .../container/CharliecloudCache.groovy | 34 +- .../container/CharliecloudConfig.groovy} | 64 +- .../container/ContainerBuilder.groovy | 36 +- .../nextflow/container/ContainerConfig.groovy | 120 +-- .../container/ContainerHandler.groovy | 60 +- .../nextflow/container/ContainerHelper.groovy | 42 + .../nextflow/container/DockerBuilder.groovy | 53 +- .../nextflow/container/DockerConfig.groovy | 151 ++++ .../nextflow/container/PodmanBuilder.groovy | 37 +- .../nextflow/container/PodmanConfig.groovy | 111 +++ .../nextflow/container/SarusBuilder.groovy | 38 +- .../nextflow/container/SarusConfig.groovy | 78 ++ .../nextflow/container/ShifterBuilder.groovy | 27 +- .../nextflow/container/ShifterConfig.groovy | 62 ++ .../container/SingularityBuilder.groovy | 39 +- .../container/SingularityCache.groovy | 40 +- .../container/SingularityConfig.groovy} | 84 +- .../nextflow/container/UdockerBuilder.groovy | 108 --- .../groovy/nextflow/dag/DotRenderer.groovy | 11 +- .../nextflow/dag/GraphVizRenderer.groovy | 11 +- .../nextflow/dag/MermaidHtmlRenderer.groovy | 12 +- .../nextflow/dag/MermaidRenderer.groovy | 15 +- .../executor/AbstractGridExecutor.groovy | 10 +- .../executor/BashWrapperBuilder.groovy | 32 +- .../nextflow/executor/BatchCleanup.groovy | 18 +- .../nextflow/executor/CrgExecutor.groovy | 7 +- .../groovy/nextflow/executor/Executor.groovy | 5 + .../nextflow/executor/ExecutorConfig.groovy | 232 ++++++ .../nextflow/executor/ExecutorFactory.groovy | 10 +- .../executor/ExecutorRetryConfig.groovy} | 46 +- .../nextflow/executor/GridTaskHandler.groovy | 19 +- .../nextflow/executor/LsfExecutor.groovy | 6 +- .../nextflow/executor/NopeExecutor.groovy | 3 +- .../nextflow/executor/PbsExecutor.groovy | 2 +- .../nextflow/executor/PbsProExecutor.groovy | 2 +- .../nextflow/executor/SlurmExecutor.groovy | 4 +- .../executor/local/LocalExecutor.groovy | 2 +- .../nextflow/fusion/FusionConfig.groovy | 112 ++- .../nextflow/fusion/FusionHelper.groovy | 7 +- .../nextflow/mail/BaseMailProvider.groovy | 2 +- .../groovy/nextflow/mail/MailConfig.groovy | 111 +++ .../main/groovy/nextflow/mail/Mailer.groovy | 57 +- .../groovy/nextflow/mail/Notification.groovy | 74 ++ .../processor/LocalPollingMonitor.groovy | 29 +- .../processor/ParallelPollingMonitor.groovy | 1 + .../nextflow/processor/TaskArrayRun.groovy | 9 +- .../processor/TaskPollingMonitor.groovy | 45 +- .../groovy/nextflow/processor/TaskRun.groovy | 7 +- .../nextflow/script/WorkflowMetadata.groovy | 7 +- .../nextflow/script/WorkflowNotifier.groovy | 34 +- .../groovy/nextflow/spack/SpackConfig.groovy | 74 +- .../trace/DefaultObserverFactory.groovy | 72 +- .../nextflow/trace/GraphObserver.groovy | 24 +- .../nextflow/trace/ReportObserver.groovy | 32 +- .../nextflow/trace/TimelineObserver.groovy | 10 +- .../nextflow/trace/TraceFileObserver.groovy | 29 +- .../nextflow/trace/config/DagConfig.groovy} | 50 +- .../nextflow/trace/config/ReportConfig.groovy | 71 ++ .../trace/config/TimelineConfig.groovy | 63 ++ .../nextflow/trace/config/TraceConfig.groovy | 84 ++ .../groovy/nextflow/util/ConfigHelper.groovy | 17 - .../main/resources/META-INF/extensions.idx | 33 +- .../test/groovy/nextflow/SessionTest.groovy | 185 +---- .../nextflow/conda/CondaCacheTest.groovy | 24 +- .../nextflow/conda/CondaConfigTest.groovy | 8 +- .../nextflow/config/ConfigBuilderTest.groovy | 17 +- .../nextflow/config/ManifestTest.groovy | 4 +- .../container/ApptainerBuilderTest.groovy | 44 +- .../container/ApptainerCacheTest.groovy | 18 +- .../container/CharliecloudBuilderTest.groovy | 40 +- .../container/CharliecloudCacheTest.groovy | 20 +- .../container/ContainerBuilderTest.groovy | 30 +- .../container/ContainerConfigTest.groovy | 80 +- .../container/ContainerHandlerTest.groovy | 43 +- .../container/DockerBuilderTest.groovy | 38 +- .../container/PodmanBuilderTest.groovy | 26 +- .../container/SarusBuilderTest.groovy | 8 +- .../container/ShifterBuilderTest.groovy | 3 +- .../container/SingularityBuilderTest.groovy | 54 +- .../container/SingularityCacheTest.groovy | 20 +- .../container/UdockerBuilderTest.groovy | 152 ---- .../DefaultContainerResolverTest.groovy | 4 +- .../nextflow/dag/DotRendererTest.groovy | 2 +- .../nextflow/dag/MermaidRendererTest.groovy | 10 +- .../executor/AbstractGridExecutorTest.groovy | 31 +- .../executor/BashWrapperBuilderTest.groovy | 74 +- .../nextflow/executor/BatchCleanupTest.groovy | 6 +- .../executor/CondorExecutorTest.groovy | 4 +- .../nextflow/executor/CrgExecutorTest.groovy | 12 +- .../executor/ExecutorConfigTest.groovy | 120 +++ .../nextflow/executor/GridExecutorTest.groovy | 20 +- .../executor/GridTaskHandlerTest.groovy | 16 +- .../nextflow/executor/LsfExecutorTest.groovy | 87 +- .../nextflow/executor/PbsExecutorTest.groovy | 38 +- .../executor/PbsProExecutorTest.groovy | 50 +- .../executor/SlurmExecutorTest.groovy | 38 +- .../local/LocalTaskHandlerTest.groovy | 4 +- .../nextflow/fusion/FusionHelperTest.groovy | 19 +- .../groovy/nextflow/mail/MailerTest.groovy | 61 +- .../processor/LocalPollingMonitorTest.groovy | 33 +- .../nextflow/processor/TaskBeanTest.groovy | 6 +- .../processor/TaskPollingMonitorTest.groovy | 10 +- .../nextflow/processor/TaskRunTest.groovy | 33 +- .../nextflow/script/ScriptRunnerTest.groovy | 23 +- .../script/WorkflowNotifierTest.groovy | 35 +- .../nextflow/spack/SpackCacheTest.groovy | 10 +- .../nextflow/spack/SpackConfigTest.groovy | 8 +- .../nextflow/trace/GraphObserverTest.groovy | 29 +- .../nextflow/trace/ReportObserverTest.groovy | 11 +- .../trace/TimelineObserverTest.groovy | 4 +- .../trace/TraceFileObserverTest.groovy | 2 +- .../nextflow/util/ConfigHelperTest.groovy | 15 - .../nextflow/util/KryoHelperTest.groovy | 8 +- .../nextflow/config/schema/ConfigOption.java | 3 +- .../config/scopes/AwsBatchConfig.java | 119 --- .../config/scopes/AwsClientConfig.java | 166 ---- .../nextflow/config/scopes/AwsConfig.java | 52 -- .../scopes/AzureActiveDirectoryConfig.java | 42 - .../config/scopes/AzureBatchConfig.java | 106 --- .../config/scopes/AzureBatchPoolConfig.java | 141 ---- .../nextflow/config/scopes/AzureConfig.java | 34 - .../scopes/AzureManagedIdentityConfig.java | 46 -- .../config/scopes/AzureRegistryConfig.java | 42 - .../config/scopes/AzureRetryConfig.java | 49 -- .../config/scopes/AzureStorageConfig.java | 49 -- .../nextflow/config/scopes/CondaConfig.java | 74 -- .../java/nextflow/config/scopes/Config.java | 223 +----- .../nextflow/config/scopes/DockerConfig.java | 96 --- .../nextflow/config/scopes/EnvConfig.java | 21 - .../config/scopes/ExecutorConfig.java | 150 ---- .../nextflow/config/scopes/FusionConfig.java | 79 -- .../config/scopes/GoogleBatchConfig.java | 101 --- .../nextflow/config/scopes/GoogleConfig.java | 59 -- .../nextflow/config/scopes/K8sConfig.java | 157 ---- .../config/scopes/K8sRetryConfig.java | 49 -- .../nextflow/config/scopes/LineageConfig.java | 40 - .../nextflow/config/scopes/MailConfig.java | 71 -- .../java/nextflow/config/scopes/Manifest.java | 117 --- .../config/scopes/NextflowConfig.java | 24 - .../nextflow/config/scopes/ParamsConfig.java | 21 - .../nextflow/config/scopes/PodmanConfig.java | 72 -- .../nextflow/config/scopes/ProcessConfig.java | 24 - .../config/scopes/ProfilesConfig.java | 24 - .../nextflow/config/scopes/ReportConfig.java | 42 - .../nextflow/config/scopes/ShifterConfig.java | 30 - .../nextflow/config/scopes/SpackConfig.java | 51 -- .../config/scopes/TimelineConfig.java | 42 - .../nextflow/config/scopes/TraceConfig.java | 62 -- .../config/scopes/WaveBuildConfig.java | 100 --- .../nextflow/config/scopes/WaveConfig.java | 64 -- .../config/scopes/WaveHttpConfig.java | 31 - .../config/scopes/WaveRetryConfig.java | 49 -- .../config/scopes/WaveScanConfig.java | 36 - .../config/scopes/WorkflowConfig.java | 52 -- .../config/scopes/WorkflowOutputConfig.java | 74 -- .../lineage/config/LineageConfig.groovy | 29 +- .../lineage/config/LineageStoreOpts.groovy | 9 +- .../src/resources/META-INF/extensions.idx | 1 + .../cloud/aws/batch/AwsBatchExecutor.groovy | 16 +- .../aws/batch/AwsBatchScriptLauncher.groovy | 3 +- .../cloud/aws/config/AwsBatchConfig.groovy | 188 ++--- .../cloud/aws/config/AwsConfig.groovy | 90 ++- .../cloud/aws/config/AwsS3Config.groovy | 273 +++++-- .../cloud/aws/config/AwsS3Legacy.groovy | 79 -- .../src/resources/META-INF/extensions.idx | 1 + .../batch/AwsBatchScriptLauncherTest.groovy | 4 +- .../cloud/aws/config/AwsS3LegacyTest.groovy | 69 -- .../aws/nio/S3FileSystemProviderTest.groovy | 42 +- .../cloud/azure/batch/AzBatchExecutor.groovy | 18 +- .../cloud/azure/batch/AzBatchService.groovy | 2 +- .../azure/batch/AzBatchTaskHandler.groovy | 2 +- .../azure/batch/AzFileCopyStrategy.groovy | 2 +- .../azure/config/AzActiveDirectoryOpts.groovy | 25 +- .../cloud/azure/config/AzBatchOpts.groovy | 111 ++- .../cloud/azure/config/AzConfig.groovy | 54 +- .../cloud/azure/config/AzCopyOpts.groovy | 18 +- .../cloud/azure/config/AzFileShareOpts.groovy | 48 +- .../azure/config/AzManagedIdentityOpts.groovy | 17 +- .../cloud/azure/config/AzPoolOpts.groovy | 121 ++- .../cloud/azure/config/AzRegistryOpts.groovy | 25 +- .../cloud/azure/config/AzRetryConfig.groovy | 25 +- .../cloud/azure/config/AzStartTaskOpts.groovy | 18 +- .../cloud/azure/config/AzStorageOpts.groovy | 35 +- .../src/resources/META-INF/extensions.idx | 1 + .../azure/batch/AzBatchServiceTest.groovy | 121 ++- .../azure/batch/AzFileCopyStrategyTest.groovy | 6 +- .../BashWrapperBuilderWithAzTest.groovy | 2 +- .../nextflow/cloud/google/GoogleOpts.groovy | 83 +- .../google/batch/GoogleBatchExecutor.groovy | 16 +- .../batch/GoogleBatchScriptLauncher.groovy | 14 +- .../batch/GoogleBatchTaskHandler.groovy | 66 +- .../google/batch/client/BatchClient.groovy | 15 +- .../google/batch/client/BatchConfig.groovy | 180 +++-- .../google/batch/logging/BatchLogging.groovy | 12 +- .../google/config/GoogleRetryOpts.groovy | 19 +- .../google/config/GoogleStorageOpts.groovy | 7 +- .../src/resources/META-INF/extensions.idx | 1 + .../GoogleBatchScriptLauncherTest.groovy | 10 +- .../batch/GoogleBatchTaskHandlerTest.groovy | 30 +- .../batch/client/BatchConfigTest.groovy | 27 +- .../batch/logging/BatchLoggingTest.groovy | 4 +- .../src/main/nextflow/k8s/K8sConfig.groovy | 356 +++++---- .../src/main/nextflow/k8s/K8sExecutor.groovy | 2 +- .../main/nextflow/k8s/K8sTaskHandler.groovy | 3 +- .../nextflow/k8s/client/ClientConfig.groovy | 40 - .../nextflow/k8s/client/K8sRetryConfig.groovy | 25 +- .../src/resources/META-INF/extensions.idx | 1 + .../test/nextflow/k8s/K8sConfigTest.groovy | 149 +--- .../nextflow/k8s/K8sDriverLauncherTest.groovy | 2 +- .../k8s/client/ClientConfigTest.groovy | 75 -- .../io/seqera/tower/plugin/TowerClient.groovy | 27 +- .../io/seqera/tower/plugin/TowerConfig.groovy | 44 +- .../seqera/tower/plugin/TowerFactory.groovy | 31 +- .../seqera/tower/plugin/TowerReports.groovy | 16 +- .../src/resources/META-INF/extensions.idx | 1 + .../tower/plugin/CacheManagerTest.groovy | 9 + .../tower/plugin/TowerClientTest.groovy | 68 +- .../seqera/wave/plugin/cli/WaveRunCmd.groovy | 5 +- .../seqera/wave/plugin/config/HttpOpts.groovy | 13 +- .../wave/plugin/config/RetryOpts.groovy | 25 +- .../wave/plugin/config/WaveConfig.groovy | 193 +++-- .../src/resources/META-INF/extensions.idx | 1 + .../wave/plugin/config/WaveConfigTest.groovy | 2 +- 240 files changed, 5310 insertions(+), 6682 deletions(-) create mode 100644 modules/nextflow/src/main/groovy/nextflow/config/WorkflowConfig.groovy rename modules/{nf-lang/src/main/java/nextflow/config/scopes/ApptainerConfig.java => nextflow/src/main/groovy/nextflow/container/ApptainerConfig.groovy} (51%) rename modules/{nf-lang/src/main/java/nextflow/config/scopes/CharliecloudConfig.java => nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy} (52%) create mode 100644 modules/nextflow/src/main/groovy/nextflow/container/ContainerHelper.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/container/PodmanConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/container/SarusConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/container/ShifterConfig.groovy rename modules/{nf-lang/src/main/java/nextflow/config/scopes/SingularityConfig.java => nextflow/src/main/groovy/nextflow/container/SingularityConfig.groovy} (51%) delete mode 100644 modules/nextflow/src/main/groovy/nextflow/container/UdockerBuilder.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/executor/ExecutorConfig.groovy rename modules/{nf-lang/src/main/java/nextflow/config/scopes/ExecutorRetryConfig.java => nextflow/src/main/groovy/nextflow/executor/ExecutorRetryConfig.groovy} (61%) create mode 100644 modules/nextflow/src/main/groovy/nextflow/mail/MailConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/mail/Notification.groovy rename modules/{nf-lang/src/main/java/nextflow/config/scopes/DagConfig.java => nextflow/src/main/groovy/nextflow/trace/config/DagConfig.groovy} (55%) create mode 100644 modules/nextflow/src/main/groovy/nextflow/trace/config/ReportConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/trace/config/TimelineConfig.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/trace/config/TraceConfig.groovy delete mode 100644 modules/nextflow/src/test/groovy/nextflow/container/UdockerBuilderTest.groovy create mode 100644 modules/nextflow/src/test/groovy/nextflow/executor/ExecutorConfigTest.groovy delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AwsBatchConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AwsConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureActiveDirectoryConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchPoolConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureManagedIdentityConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRegistryConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRetryConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/AzureStorageConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/CondaConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/DockerConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/EnvConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/FusionConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleBatchConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/K8sConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/K8sRetryConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/LineageConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/MailConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/Manifest.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/NextflowConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ParamsConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/PodmanConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ProcessConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ProfilesConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ReportConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/ShifterConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/SpackConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/TimelineConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/TraceConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WaveBuildConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WaveConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WaveHttpConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WaveRetryConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WaveScanConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowConfig.java delete mode 100644 modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowOutputConfig.java delete mode 100644 plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Legacy.groovy delete mode 100644 plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3LegacyTest.groovy rename modules/nf-lang/src/main/java/nextflow/config/scopes/TowerConfig.java => plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerConfig.groovy (50%) diff --git a/docs/google.md b/docs/google.md index f6b03a149c..082af214ed 100644 --- a/docs/google.md +++ b/docs/google.md @@ -2,6 +2,8 @@ # Google Cloud +(google-credentials)= + ## Credentials Credentials for submitting requests to the Google Cloud Batch API are picked up from your environment using [Application Default Credentials](https://github.com/googleapis/google-auth-library-java#google-auth-library-oauth2-http). Application Default Credentials are designed to use the credentials most natural to the environment in which a tool runs. diff --git a/docs/notifications.md b/docs/notifications.md index 433d32dffb..ce8120943d 100644 --- a/docs/notifications.md +++ b/docs/notifications.md @@ -220,6 +220,16 @@ mail { } ``` +:::{note} +Some versions of Java (e.g. Java 11 Corretto) do not default to TLS v1.2, and as a result may have issues with 3rd party integrations that enforce TLS v1.2 (e.g. Azure Active Directory OIDC). This problem can be addressed by setting the following config option: + +```groovy +mail { + smtp.ssl.protocols = 'TLSv1.2' +} +``` +::: + See the {ref}`mail scope ` section to learn more the mail server configuration options. ### AWS SES configuration @@ -227,10 +237,9 @@ See the {ref}`mail scope ` section to learn more the mail server co :::{versionadded} 23.06.0-edge ::: -Nextflow supports [AWS SES](https://aws.amazon.com/ses/) native API as an alternative -provider to send emails in place of SMTP server. +Nextflow supports the [AWS Simple Email Service](https://aws.amazon.com/ses/) API as an alternative provider to send emails in place of an SMTP server. -To enable this feature add the following environment variable in the launching environment: +To enable this feature, set the following environment variable in the launch environment: ```bash export NXF_ENABLE_AWS_SES=true @@ -242,6 +251,20 @@ Make also sure to add the following AWS IAM permission to the AWS user (or role) ses:SendRawEmail ``` +The following snippet shows how to configure Nextflow to send emails through SES: + +```groovy +mail { + smtp.host = 'email-smtp.us-east-1.amazonaws.com' + smtp.port = 587 + smtp.user = '' + smtp.password = '' + smtp.auth = true + smtp.starttls.enable = true + smtp.starttls.required = true +} +``` + ## Mail notification You can use the `sendMail` function with a {ref}`workflow completion handler ` to notify the completion of a workflow completion. For example: diff --git a/docs/reference/config.md b/docs/reference/config.md index e8857d9fac..2746d0baf7 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -8,32 +8,31 @@ This page lists all of the available settings in the {ref}`Nextflow configuratio ## Unscoped options +The following settings are available: + `bucketDir` : The remote work directory used by hybrid workflows. Equivalent to the `-bucket-dir` option of the `run` command. `cleanup` -: If `true`, on a successful completion of a run all files in *work* directory are automatically deleted. +: Delete all files associated with a run in the work directory when the run completes successfully (default: `false`). - :::{warning} - The use of the `cleanup` option will prevent the use of the *resume* feature on subsequent executions of that pipeline run. +: :::{warning} + This option will prevent the use of the *resume* feature on subsequent executions of that pipeline run. ::: :::{warning} - The `cleanup` option is not supported for remote work directories, such as Amazon S3, Google Cloud Storage, and Azure Blob Storage. + This option is not supported for remote work directories, such as Amazon S3, Google Cloud Storage, and Azure Blob Storage. ::: -`dumpHashes` -: If `true`, dump task hash keys in the log file, for debugging purposes. Equivalent to the `-dump-hashes` option of the `run` command. - `outputDir` : :::{versionadded} 24.10.0 ::: -: Defines the pipeline output directory. Equivalent to the `-output-dir` option of the `run` command. +: The pipeline output directory. Equivalent to the `-output-dir` option of the `run` command. `resume` -: If `true`, enable the use of previously cached task executions. Equivalent to the `-resume` option of the `run` command. +: Enable the use of previously cached task executions. Equivalent to the `-resume` option of the `run` command. `workDir` -: Defines the pipeline work directory. Equivalent to the `-work-dir` option of the `run` command. +: The pipeline work directory. Equivalent to the `-work-dir` option of the `run` command. (config-apptainer)= @@ -82,39 +81,11 @@ The following settings are available: `apptainer.runOptions` : Specify extra command line options supported by `apptainer exec`. -Read the {ref}`container-apptainer` page to learn more about how to use Apptainer containers with Nextflow. - (config-aws)= ## `aws` -The `aws` scope controls the interactions with AWS, including AWS Batch and S3. For example: - -```groovy -aws { - accessKey = '' - secretKey = '' - region = 'us-east-1' - - client { - maxConnections = 20 - connectionTimeout = 10000 - uploadStorageClass = 'INTELLIGENT_TIERING' - storageEncryption = 'AES256' - } - batch { - cliPath = '/home/ec2-user/miniconda/bin/aws' - maxTransferAttempts = 3 - delayBetweenAttempts = '5 sec' - } -} -``` - -:::{tip} -This scope can also be used to configure access to S3-compatible storage outside of AWS, such as [Ceph](https://ceph.com/en/) and [MinIO](https://min.io/). -::: - -Read the {ref}`aws-page` and {ref}`amazons3-page` pages for more information. +The `aws` scope controls the interactions with AWS, including AWS Batch and S3. The following settings are available: @@ -139,9 +110,7 @@ The following settings are available: : Delay between download attempts from S3 (default: `10 sec`). `aws.batch.executionRole` -: :::{versionadded} 23.12.0-edge - ::: -: The AWS Batch Execution Role ARN that needs to be used to execute the Batch Job. This is mandatory when using AWS Fargate platform type. See [AWS documentation](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) for more details. +: The AWS Batch [Execution Role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) ARN that needs to be used to execute the Batch Job. It is mandatory when using AWS Fargate. `aws.batch.jobRole` : The AWS Batch Job Role ARN that needs to be used to execute the Batch Job. @@ -166,36 +135,31 @@ The following settings are available: : Max number of downloads attempts from S3 (default: `1`). `aws.batch.platformType` -: :::{versionadded} 23.12.0-edge - ::: -: Allow specifying the compute platform type used by AWS Batch, that can be either `ec2` or `fargate`. See AWS documentation to learn more about [AWS Fargate platform type](https://docs.aws.amazon.com/batch/latest/userguide/fargate.html) for AWS Batch. +: The compute platform type used by AWS Batch. Can be either `ec2` or `fargate`. Set to `fargate` to use [AWS Fargate](https://docs.aws.amazon.com/batch/latest/userguide/fargate.html). `aws.batch.retryMode` -: The retry mode configuration setting, to accommodate rate-limiting on [AWS services](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-retries.html) (default: `standard`, other options: `legacy`, `adaptive`); this handling is delegated to AWS. To have Nextflow handle retries instead, use `built-in`. +: The [retry mode](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-retries.html) used to handle rate-limiting by AWS APIs. Can be one of `standard`, `legacy`, `adaptive`, or `built-in` (default: `standard`). `aws.batch.schedulingPriority` : :::{versionadded} 23.01.0-edge ::: -: The scheduling priority for all tasks when using [fair-share scheduling for AWS Batch](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) (default: `0`) +: The scheduling priority for all tasks when using [fair-share scheduling](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) (default: `0`). `aws.batch.shareIdentifier` : :::{versionadded} 22.09.0-edge ::: -: The share identifier for all tasks when using [fair-share scheduling for AWS Batch](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) +: The share identifier for all tasks when using [fair-share scheduling](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/). `aws.batch.terminateUnschedulableJobs` : :::{versionadded} 25.03.0-edge ::: -: When `true`, jobs that cannot be scheduled for lack of resources or misconfiguration are terminated automatically (default: `false`). The pipeline may complete with an error status depending on the error strategy defined for the corresponding jobs. +: When `true`, jobs that cannot be scheduled due to lack of resources or misconfiguration are terminated and handled as task failures (default: `false`). `aws.batch.volumes` -: One or more container mounts. Mounts can be specified as simple e.g. `/some/path` or canonical format e.g. `/host/path:/mount/path[:ro|rw]`. Multiple mounts can be specified separating them with a comma or using a list object. +: List of container mounts. Mounts can be specified as simple e.g. `/some/path` or canonical format e.g. `/host/path:/mount/path[:ro|rw]`. `aws.client.anonymous` -: Allow the access of public S3 buckets without the need to provide AWS credentials (default: `false`). Any service that does not accept unsigned requests will return a service access error. - -`aws.client.s3Acl` -: Allow the setting of predefined bucket permissions, also known as *canned ACL*. Permitted values are `Private`, `PublicRead`, `PublicReadWrite`, `AuthenticatedRead`, `LogDeliveryWrite`, `BucketOwnerRead`, `BucketOwnerFullControl`, and `AwsExecRead` (default: none). See [Amazon docs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) for details. +: Allow the access of public S3 buckets without providing AWS credentials (default: `false`). Any service that does not accept unsigned requests will return a service access error. `aws.client.connectionTimeout` : The amount of time to wait (in milliseconds) when initially establishing a connection before timing out (default: `10000`). @@ -203,22 +167,6 @@ The following settings are available: `aws.client.endpoint` : The AWS S3 API entry point e.g. `https://s3-us-west-1.amazonaws.com`. The endpoint must include the protocol prefix e.g. `https://`. -`aws.client.glacierAutoRetrieval` -: :::{deprecated} 24.02.0-edge - Glacier auto-retrieval is no longer supported. Instead, consider using the AWS CLI to restore any Glacier objects before or at the beginning of your pipeline (i.e. in a Nextflow process). - ::: -: Enable auto retrieval of S3 objects with a Glacier storage class (default: `false`). - -`aws.client.glacierExpirationDays` -: :::{deprecated} 24.02.0-edge - ::: -: The time, in days, between when an object is restored to the bucket and when it expires (default: `7`). - -`aws.client.glacierRetrievalTier` -: :::{deprecated} 24.02.0-edge - ::: -: The retrieval tier to use when restoring objects from Glacier, one of [`Expedited`, `Standard`, `Bulk`]. - `aws.client.maxConcurrency` : :::{versionadded} 25.06.0-edge ::: @@ -254,6 +202,9 @@ The following settings are available: `aws.client.proxyHost` : The proxy host to connect through. +`aws.client.proxyPassword` +: The password to use when connecting through a proxy. + `aws.client.proxyPort` : The port to use when connecting through a proxy. @@ -265,14 +216,14 @@ The following settings are available: `aws.client.proxyUsername` : The user name to use when connecting through a proxy. -`aws.client.proxyPassword` -: The password to use when connecting through a proxy. - `aws.client.requesterPays` : :::{versionadded} 24.05.0-edge ::: : Use [Requester Pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html) for S3 buckets (default: `false`). +`aws.client.s3Acl` +: Specify predefined bucket permissions, also known as [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). Can be one of `Private`, `PublicRead`, `PublicReadWrite`, `AuthenticatedRead`, `LogDeliveryWrite`, `BucketOwnerRead`, `BucketOwnerFullControl`, or `AwsExecRead`. + `aws.client.s3PathStyleAccess` : Use the path-based access model to access objects in S3-compatible storage systems (default: `false`). @@ -297,6 +248,9 @@ The following settings are available: `aws.client.socketTimeout` : The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection is timed out (default: `50000`). +`aws.client.storageClass` +: The S3 storage class applied to stored objects, one of \[`STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`\] (default: `STANDARD`). + `aws.client.storageEncryption` : The S3 server side encryption to be used when saving objects on S3. Can be `AES256` or `aws:kms` (default: none). @@ -313,7 +267,7 @@ The following settings are available: `aws.client.transferManagerThreads` : :::{versionadded} 25.06.0-edge ::: -: Number of threads used by the S3 transfer manager (default `10`). +: The number of threads used by the S3 transfer manager (default: `10`). `aws.client.userAgent` : :::{deprecated} 25.06.0-edge @@ -354,8 +308,6 @@ The following settings are available: The `azure` scope allows you to configure the interactions with Azure, including Azure Batch and Azure Blob Storage. -Read the {ref}`azure-page` page for more information. - The following settings are available: `azure.activeDirectory.servicePrincipalId` @@ -373,12 +325,12 @@ The following settings are available: `azure.azcopy.blockSize` : The block size (in MB) used by `azcopy` to transfer files between Azure Blob Storage and compute nodes (default: `4`). -`azure.batch.accountName` -: The batch service account name. Defaults to environment variable `AZURE_BATCH_ACCOUNT_NAME`. - `azure.batch.accountKey` : The batch service account key. Defaults to environment variable `AZURE_BATCH_ACCOUNT_KEY`. +`azure.batch.accountName` +: The batch service account name. Defaults to environment variable `AZURE_BATCH_ACCOUNT_NAME`. + `azure.batch.allowPoolCreation` : Enable the automatic creation of batch pools specified in the Nextflow configuration file (default: `false`). @@ -386,13 +338,24 @@ The following settings are available: : Enable the automatic creation of batch pools depending on the pipeline resources demand (default: `true`). `azure.batch.copyToolInstallMode` -: Specify where the `azcopy` tool used by Nextflow. When `node` is specified it's copied once during the pool creation. When `task` is provider, it's installed for each task execution. Finally when `off` is specified, the `azcopy` tool is not installed (default: `node`). +: The mode in which the `azcopy` tool is installed by Nextflow (default: `'node'`). + +: The following options are available: + + `'node'` + : The `azcopy` tool is installed once during the pool creation. + + `'task'` + : The `azcopy` tool is installed for each task execution. + + `'off'` + : The `azcopy` tool is not installed. `azure.batch.deleteJobsOnCompletion` -: Delete all jobs when the workflow completes (default: `false`). : :::{versionchanged} 23.08.0-edge Default value was changed from `true` to `false`. ::: +: Delete all jobs when the workflow completes (default: `false`). `azure.batch.deletePoolsOnCompletion` : Delete all compute node pools when the workflow completes (default: `false`). @@ -406,24 +369,25 @@ The following settings are available: `azure.batch.endpoint` : The batch service endpoint e.g. `https://nfbatch1.westeurope.batch.azure.com`. -`azure.batch.location` -: The name of the batch service region, e.g. `westeurope` or `eastus2`. This is not needed when the endpoint is specified. - `azure.batch.jobMaxWallClockTime` : :::{versionadded} 25.04.0 ::: -: The maximum elapsed time that jobs may run, measured from the time they are created. If jobs do not complete within this time limit, the Batch service terminates them and any tasks still running (default: `30d`). +: The maximum elapsed time that jobs may run, measured from the time they are created (default: `30d`). +: If jobs do not complete within this time limit, the Batch service terminates them and any tasks still running. -`azure.batch.terminateJobsOnCompletion` -: :::{versionadded} 23.05.0-edge +`azure.batch.location` +: The name of the batch service region, e.g. `westeurope` or `eastus2`. Not needed when the endpoint is specified. + +`azure.batch.poolIdentityClientId` +: :::{versionadded} 25.05.0-edge ::: -: When the workflow completes, set all jobs to terminate on task completion. (default: `true`). +: The client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) that is available on all Azure Batch node pools. This identity is used by Fusion to authenticate to Azure storage. If set to `'auto'`, Fusion will use the first available managed identity. `azure.batch.pools..autoScale` : Enable autoscaling feature for the pool identified with ``. `azure.batch.pools..fileShareRootPath` -: If mounting File Shares, this is the internal root mounting point. Must be `/mnt/resource/batch/tasks/fsmounts` for CentOS nodes or `/mnt/batch/tasks/fsmounts` for Ubuntu nodes (default is for CentOS). +: The internal root mount point when mounting File Shares. Must be `/mnt/resource/batch/tasks/fsmounts` for CentOS nodes or `/mnt/batch/tasks/fsmounts` for Ubuntu nodes (default: CentOS). `azure.batch.pools..lowPriority` : Enable the use of low-priority VMs (default: `false`). @@ -433,78 +397,78 @@ The following settings are available: ::: `azure.batch.pools..maxVmCount` -: Specify the max of virtual machine when using auto scale option. +: The max number of virtual machines when using auto scaling. `azure.batch.pools..mountOptions` -: Specify the mount options for mounting the file shares (default: `-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp`). +: The mount options for mounting the file shares (default: `-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp`). `azure.batch.pools..offer` -: Specify the offer type of the virtual machine type used by the pool identified with `` (default: `centos-container`). +: The offer type of the virtual machine type used by the pool identified with `` (default: `centos-container`). `azure.batch.pools..privileged` : Enable the task to run with elevated access. Ignored if `runAs` is set (default: `false`). `azure.batch.pools..publisher` -: Specify the publisher of virtual machine type used by the pool identified with `` (default: `microsoft-azure-batch`). +: The publisher of virtual machine type used by the pool identified with `` (default: `microsoft-azure-batch`). `azure.batch.pools..runAs` -: Specify the username under which the task is run. The user must already exist on each node of the pool. +: The username under which the task is run. The user must already exist on each node of the pool. `azure.batch.pools..scaleFormula` -: Specify the scale formula for the pool identified with ``. See Azure Batch [scaling documentation](https://docs.microsoft.com/en-us/azure/batch/batch-automatic-scaling) for details. +: The [scale formula](https://docs.microsoft.com/en-us/azure/batch/batch-automatic-scaling) for the pool identified with ``. `azure.batch.pools..scaleInterval` -: Specify the interval at which to automatically adjust the Pool size according to the autoscale formula. The minimum and maximum value are 5 minutes and 168 hours respectively (default: `10 mins`). +: The interval at which to automatically adjust the Pool size according to the autoscale formula. Must be at least 5 minutes and at most 168 hours (default: `10 mins`). `azure.batch.pools..schedulePolicy` -: Specify the scheduling policy for the pool identified with ``. It can be either `spread` or `pack` (default: `spread`). +: The scheduling policy for the pool identified with ``. Can be either `spread` or `pack` (default: `spread`). `azure.batch.pools..sku` -: Specify the ID of the Compute Node agent SKU which the pool identified with `` supports (default: `batch.node.centos 8`). +: The ID of the Compute Node agent SKU which the pool identified with `` supports (default: `batch.node.centos 8`). -`azure.batch.pools..startTask.script` +`azure.batch.pools..startTask.privileged` : :::{versionadded} 24.03.0-edge ::: -: Specify the `startTask` that is executed as the node joins the Azure Batch node pool. +: Enable the `startTask` to run with elevated access (default: `false`). -`azure.batch.pools..startTask.privileged` +`azure.batch.pools..startTask.script` : :::{versionadded} 24.03.0-edge ::: -: Enable the `startTask` to run with elevated access (default: `false`). +: The `startTask` that is executed as the node joins the Azure Batch node pool. `azure.batch.pools..virtualNetwork` : :::{versionadded} 23.03.0-edge ::: -: Specify the subnet ID of a virtual network in which to create the pool. +: The subnet ID of a virtual network in which to create the pool. `azure.batch.pools..vmCount` -: Specify the number of virtual machines provisioned by the pool identified with ``. +: The number of virtual machines provisioned by the pool identified with ``. `azure.batch.pools..vmType` -: Specify the virtual machine type used by the pool identified with ``. +: The virtual machine type used by the pool identified with ``. -`azure.batch.poolIdentityClientId` -: :::{versionadded} 25.05.0-edge +`azure.batch.terminateJobsOnCompletion` +: :::{versionadded} 23.05.0-edge ::: -: Specify the client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) that is available on all Azure Batch node pools. This identity will be used by Fusion to authenticate to Azure storage. If set to `'auto'`, Fusion will use the first available managed identity. +: When the workflow completes, set all jobs to terminate on task completion (default: `true`). `azure.managedIdentity.clientId` -: Specify the client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview). See {ref}`azure-managed-identities` for more details. Defaults to environment variable `AZURE_MANAGED_IDENTITY_USER`. +: The client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview). Defaults to environment variable `AZURE_MANAGED_IDENTITY_USER`. `azure.managedIdentity.system` -: When `true`, uses the system-assigned [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) to authenticate Azure resources. See {ref}`azure-managed-identities` for more details. Defaults to environment variable `AZURE_MANAGED_IDENTITY_SYSTEM`. +: When `true`, use the system-assigned [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) to authenticate Azure resources. Defaults to environment variable `AZURE_MANAGED_IDENTITY_SYSTEM`. + +`azure.registry.password` +: The password to connect to a private container registry. `azure.registry.server` -: Specify the container registry from which to pull the Docker images (default: `docker.io`). +: The container registry from which to pull the Docker images (default: `docker.io`). `azure.registry.userName` -: Specify the username to connect to a private container registry. - -`azure.registry.password` -: Specify the password to connect to a private container registry. +: The username to connect to a private container registry. `azure.retryPolicy.delay` -: Delay when retrying failed API requests (default: `500ms`). +: Delay when retrying failed API requests (default: `250ms`). `azure.retryPolicy.jitter` : Jitter value when retrying failed API requests (default: `0.25`). @@ -515,17 +479,23 @@ The following settings are available: `azure.retryPolicy.maxDelay` : Max delay when retrying failed API requests (default: `90s`). +`azure.storage.accountKey` +: The blob storage account key. Defaults to environment variable `AZURE_STORAGE_ACCOUNT_KEY`. + `azure.storage.accountName` : The blob storage account name. Defaults to environment variable `AZURE_STORAGE_ACCOUNT_NAME`. -`azure.storage.accountKey` -: The blob storage account key. Defaults to environment variable `AZURE_STORAGE_ACCOUNT_KEY`. +`azure.storage.fileShares..mountOptions` +: The file share mount options. + +`azure.storage.fileShares..mountPath` +: The file share mount path. `azure.storage.sasToken` -: The blob storage shared access signature token, which can be provided as an alternative to `accountKey`. Defaults to environment variable `AZURE_STORAGE_SAS_TOKEN`. +: The blob storage shared access signature (SAS) token, which can be provided instead of an account key. Defaults to environment variable `AZURE_STORAGE_SAS_TOKEN`. `azure.storage.tokenDuration` -: The duration of the shared access signature token created by Nextflow when the `sasToken` option is *not* specified (default: `48h`). +: The duration of the SAS token generated by Nextflow when the `sasToken` option is *not* specified (default: `48h`). (config-charliecloud)= @@ -535,31 +505,30 @@ The `charliecloud` scope controls how [Charliecloud](https://hpc.github.io/charl The following settings are available: -`charliecloud.enabled` -: Execute tasks with Charliecloud containers (default: `false`). - -`charliecloud.writeFake` -: Enable `writeFake` with charliecloud (default: `true`) This allows to run containers from storage in writeable mode, using overlayfs. `writeFake` requires unprivileged `overlayfs` (Linux kernel >= 5.11). For full support, tempfs with xattrs in the user namespace (Linux kernel >= 6.6) is required, see [charliecloud documentation](https://hpc.github.io/charliecloud/ch-run.html#ch-run-overlay) for details. - `charliecloud.cacheDir` : The directory where remote Charliecloud images are stored. When using a computing cluster it must be a shared folder accessible to all compute nodes. +`charliecloud.enabled` +: Execute tasks with Charliecloud containers (default: `false`). + `charliecloud.envWhitelist` : Comma separated list of environment variable names to be included in the container environment. `charliecloud.pullTimeout` : The amount of time the Charliecloud pull can last, exceeding which the process is terminated (default: `20 min`). +`charliecloud.registry` +: The registry from where images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. + `charliecloud.runOptions` : Specify extra command line options supported by the `ch-run` command. `charliecloud.temp` -: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `auto` to create a temporary directory each time a container is created. - -`charliecloud.registry` -: The registry from where images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. +: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. -Read the {ref}`container-charliecloud` page to learn more about how to use Charliecloud containers with Nextflow. +`charliecloud.writeFake` +: Run containers from storage in writeable mode using overlayfs (default: `true`). +: This option requires unprivileged `overlayfs` (Linux kernel >= 5.11). For full support, tempfs with xattrs in the user namespace (Linux kernel >= 6.6) is required. See [charliecloud documentation](https://hpc.github.io/charliecloud/ch-run.html#ch-run-overlay) for details. (config-conda)= @@ -569,30 +538,28 @@ The `conda` scope controls the creation of Conda environments by the Conda packa The following settings are available: -`conda.enabled` -: Enables the use of Conda environments (default: `false`). - `conda.cacheDir` -: Defines the path where Conda environments are stored. Ensure the path is accessible from all compute nodes when using a shared file system. +: The path where Conda environments are stored. It should be accessible from all compute nodes when using a shared file system. `conda.channels` -: Defines the Conda channels that can be used to resolve Conda packages. Channels can be defined as a list (e.g., `['bioconda','conda-forge']`) or a comma separated list string (e.g., `'bioconda,conda-forge'`). Channel priority decreases from left to right. +: The list of Conda channels that can be used to resolve Conda packages. Channel priority decreases from left to right. `conda.createOptions` -: Defines extra command line options supported by the `conda create` command. See the [Conda documentation](https://docs.conda.io/projects/conda/en/latest/commands/create.html) for more information. +: Extra command line options for the `conda create` command. See the [Conda documentation](https://docs.conda.io/projects/conda/en/latest/commands/create.html) for more information. `conda.createTimeout` -: Defines the amount of time the Conda environment creation can last (default: `20 min`). The creation process is terminated when the timeout is exceeded. +: The amount of time to wait for the Conda environment to be created before failing (default: `20 min`). + +`conda.enabled` +: Execute tasks with Conda environments (default: `false`). `conda.useMamba` -: Uses the `mamba` binary instead of `conda` to create the Conda environments (default: `false`). See the [Mamba documentation](https://github.com/mamba-org/mamba) for more information about Mamba. +: Use [Mamba](https://github.com/mamba-org/mamba) instead of `conda` to create Conda environments (default: `false`). `conda.useMicromamba` : :::{versionadded} 22.05.0-edge ::: -: Uses the `micromamba` binary instead of `conda` to create Conda environments (default: `false`). See the [Micromamba documentation](https://mamba.readthedocs.io/en/latest/user_guide/micromamba.html) for more information about Micromamba. - -See {ref}`conda-page` for more information about using Conda environments with Nextflow. +: Use [Micromamba](https://mamba.readthedocs.io/en/latest/user_guide/micromamba.html) instead of `conda` to create Conda environments (default: `false`). (config-dag)= @@ -602,23 +569,23 @@ The `dag` scope controls the workflow diagram generated by Nextflow. The following settings are available: -`dag.enabled` -: When `true` enables the generation of the DAG file (default: `false`). - `dag.depth` : :::{versionadded} 23.10.0 ::: -: *Only supported by the HTML and Mermaid renderers.* +: *Supported by the HTML and Mermaid renderers.* : Controls the maximum depth at which to render sub-workflows (default: no limit). `dag.direction` : :::{versionadded} 23.10.0 ::: -: *Supported by Graphviz, DOT, HTML and Mermaid renderers.* +: *Supported by the Graphviz, DOT, HTML and Mermaid renderers.* : Controls the direction of the DAG, can be `'LR'` (left-to-right) or `'TB'` (top-to-bottom) (default: `'TB'`). +`dag.enabled` +: When `true` enables the generation of the DAG file (default: `false`). + `dag.file` -: Graph file name (default: `dag-.html`). +: Graph file name (default: `'dag-.html'`). `dag.overwrite` : When `true` overwrites any existing DAG file with the same name (default: `false`). @@ -629,8 +596,6 @@ The following settings are available: : *Only supported by the HTML and Mermaid renderers.* : When `false`, channel names are omitted, operators are collapsed, and empty workflow inputs are removed (default: `false`). -Read the {ref}`workflow-diagram` page to learn more about the workflow graph that can be generated by Nextflow. - (config-docker)= ## `docker` @@ -649,13 +614,13 @@ The following settings are available: : Comma separated list of environment variable names to be included in the container environment. `docker.fixOwnership` -: Fix ownership of files created by the docker container (default: `false`). +: Fix ownership of files created by the Docker container (default: `false`). `docker.legacy` : Use command line options removed since Docker 1.10.0 (default: `false`). `docker.mountFlags` -: Add the specified flags to the volume mounts e.g. `mountFlags = 'ro,Z'`. +: Add the specified flags to the volume mounts e.g. `'ro,Z'`. `docker.registry` : The registry from where Docker images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. @@ -663,10 +628,11 @@ The following settings are available: `docker.registryOverride` : :::{versionadded} 25.06.0-edge ::: -: When `true`, forces the override of the registry name in fully qualified container image names with the registry specified by `docker.registry` (default: `false`). This setting allows you to redirect container image pulls from their original registry to a different registry, such as a private mirror or proxy. +: When `true`, forces the override of the registry name in fully qualified container image names with the registry specified by `docker.registry` (default: `false`). +: This setting allows you to redirect container image pulls from their original registry to a different registry, such as a private mirror or proxy. `docker.remove` -: Clean-up the container after the execution (default: `true`). See the [Docker documentation](https://docs.docker.com/engine/reference/run/#clean-up---rm) for details. +: Clean up the container after the execution (default: `true`). See the [Docker documentation](https://docs.docker.com/engine/reference/run/#clean-up---rm) for details. `docker.runOptions` : Specify extra command line options supported by the `docker run` command. See the [Docker documentation](https://docs.docker.com/engine/reference/run/) for details. @@ -675,13 +641,11 @@ The following settings are available: : Executes Docker run command as `sudo` (default: `false`). `docker.temp` -: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `auto` to create a temporary directory each time a container is created. +: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. `docker.tty` : Allocates a pseudo-tty (default: `false`). -Read the {ref}`container-docker` page to learn more about how to use Docker containers with Nextflow. - (config-env)= ## `env` @@ -723,24 +687,30 @@ The following settings are available: : The project or organization account that should be charged for running the pipeline jobs. `executor.cpus` -: The maximum number of CPUs made available by the underlying system. Used only by the `local` executor. +: *Used only by the local executor.* +: The maximum number of CPUs made available by the underlying system. `executor.dumpInterval` -: Determines how often to log the executor status (default: `5min`). +: Determines how often to log the executor status (default: `5 min`). `executor.exitReadTimeout` : *Used only by grid executors.* -: Determines how long to wait before returning an error status when a process is terminated but the `.exitcode` file does not exist or is empty (default: `270 sec`). +: Determines how long to wait for the `.exitcode` file to be created after the task has completed, before returning an error status (default: `270 sec`). `executor.jobName` -: Determines the name of jobs submitted to the underlying cluster executor e.g. `executor.jobName = { "$task.name - $task.hash" }`. Make sure the resulting job name matches the validation constraints of the underlying batch scheduler. -: This setting is supported by the following executors: Bridge, Condor, Flux, HyperQueue, Lsf, Moab, Nqsii, Oar, PBS, PBS Pro, SGE, SLURM and Google Batch. +: *Used only by grid executors and Google Batch.* +: Determines the name of jobs submitted to the underlying cluster executor: + ```groovy + executor.jobName = { "$task.name - $task.hash" } + ``` +: The job name should satisfy the validation constraints of the underlying scheduler. `executor.killBatchSize` : Determines the number of jobs that can be killed in a single command execution (default: `100`). `executor.memory` -: The maximum amount of memory made available by the underlying system. Used only by the `local` executor. +: *Used only by the local executor.* +: The maximum amount of memory made available by the underlying system. `executor.name` : The name of the executor to be used (default: `local`). @@ -749,18 +719,18 @@ The following settings are available: : :::{versionadded} 23.07.0-edge ::: : *Used only by the {ref}`slurm-executor` executor.* -: When `true`, specifies memory allocations for SLURM jobs as `--mem-per-cpu ` instead of `--mem `. +: When `true`, memory allocations for SLURM jobs are specified as `--mem-per-cpu ` instead of `--mem `. `executor.perJobMemLimit` : *Used only by the {ref}`lsf-executor` executor.* -: Specifies Platform LSF *per-job* memory limit mode (default: `false`). +: Enables the *per-job* memory limit mode for LSF jobs. `executor.perTaskReserve` : *Used only by the {ref}`lsf-executor` executor.* -: Specifies Platform LSF *per-task* memory reserve mode (default: `false`). +: Enables the *per-task* memory reserve mode for LSF jobs. `executor.pollInterval` -: Defines the polling frequency for process termination detection. Default varies for each executor (see below). +: Determines how often to check for process termination. Default varies for each executor. `executor.queueGlobalStatus` : :::{versionadded} 23.01.0-edge @@ -768,11 +738,14 @@ The following settings are available: : Determines how job status is retrieved. When `false` only the queue associated with the job execution is queried. When `true` the job status is queried globally i.e. irrespective of the submission queue (default: `false`). `executor.queueSize` -: The number of tasks the executor will handle in a parallel manner. A queue size of zero corresponds to no limit. Default varies for each executor (see below). +: The number of tasks the executor will handle in a parallel manner. A queue size of zero corresponds to no limit. Default varies for each executor. `executor.queueStatInterval` : *Used only by grid executors.* -: Determines how often to fetch the queue status from the scheduler (default: `1min`). +: Determines how often to fetch the queue status from the scheduler (default: `1 min`). + +`executor.submitRateLimit` +: Determines the max rate of job submission per time unit, for example `'10sec'` (10 jobs per second) or `'50/2min'` (50 jobs every 2 minutes) (default: unlimited). `executor.retry.delay` : :::{versionadded} 22.03.0-edge @@ -786,7 +759,7 @@ The following settings are available: : *Used only by grid executors.* : Jitter value when retrying failed job submissions (default: `0.25`). -`executor.retry.maxAttempt` +`executor.retry.maxAttempts` : :::{versionadded} 22.03.0-edge ::: : *Used only by grid executors.* @@ -798,14 +771,16 @@ The following settings are available: : *Used only by grid executors.* : Max delay when retrying failed job submissions (default: `30s`). -`executor.submit.retry.reason` +`executor.retry.reason` : :::{versionadded} 22.03.0-edge ::: +: :::{versionchanged} 25.10.0 + This option was renamed from `executor.submit.retry.reason` to `executor.retry.reason`. + ::: : *Used only by grid executors.* -: Regex pattern that when verified cause a failed submit operation to be re-tried (default: `Socket timed out`). +: Regex pattern that when verified causes a failed submit operation to be re-tried (default: `Socket timed out`). -`executor.submitRateLimit` -: Determines the max rate of job submission per time unit, for example `'10sec'` (10 jobs per second) or `'50/2min'` (50 jobs every 2 minutes) (default: unlimited). +### Executor-specific defaults Some executor settings have different default values depending on the executor. @@ -818,6 +793,8 @@ Some executor settings have different default values depending on the executor. | Kubernetes | `100` | `5s` | | Local | N/A | `100ms` | +### Executor-specific configuration + Executor config settings can be applied to specific executors by prefixing the executor name with the symbol `$` and using it as special scope. For example: ```groovy @@ -849,33 +826,30 @@ The `fusion` scope provides advanced configuration for the use of the {ref}`Fusi The following settings are available: -`fusion.enabled` -: Enable Fusion file system (default: `false`). - `fusion.cacheSize` : :::{versionadded} 23.11.0-edge ::: -: Fusion client local cache size limit. +: The maximum size of the local cache used by the Fusion client. `fusion.containerConfigUrl` -: URL for downloading the container layer provisioning the Fusion client. +: The URL of the container layer that provides the Fusion client. + +`fusion.enabled` +: Enable the Fusion file system (default: `false`). `fusion.exportStorageCredentials` -: :::{versionadded} 23.05.0-edge - Previously named `fusion.exportAwsAccessKeys`. - ::: -: Enable access to credentials for the underlying object storage are exported to the task environment (default: `false`). +: Export the access credentials required by the underlying object storage to the task execution environment (default: `false`). `fusion.logLevel` -: Fusion client log level. +: The log level of the Fusion client. `fusion.logOutput` -: Log output location. +: The output location of the Fusion log. `fusion.privileged` : :::{versionadded} 23.10.0 ::: -: Enable privileged containers for Fusion (default: `true`) +: Enable privileged containers for Fusion (default: `true`). : Non-privileged use is supported only on Kubernetes with the [k8s-fuse-plugin](https://github.com/nextflow-io/k8s-fuse-plugin) or a similar FUSE device plugin. `fusion.snapshots` @@ -885,8 +859,7 @@ The following settings are available: : Enable Fusion snapshotting (preview, default: `false`). This feature allows Fusion to automatically restore a job when it is interrupted by a spot reclamation. `fusion.tags` -: Pattern for applying tags to files created via the Fusion client (default: `[.command.*|.exitcode|.fusion.*](nextflow.io/metadata=true),[*](nextflow.io/temporary=true)`). -: Set to `false` to disable. +: The pattern that determines how tags are applied to files created via the Fusion client (default: `[.command.*|.exitcode|.fusion.*](nextflow.io/metadata=true),[*](nextflow.io/temporary=true)`). Set to `false` to disable tags. (config-google)= @@ -894,20 +867,20 @@ The following settings are available: The `google` scope allows you to configure the interactions with Google Cloud, including Google Cloud Batch and Google Cloud Storage. -Read the {ref}`google-page` page for more information. +The following settings are available: `google.enableRequesterPaysBuckets` -: When `true` uses the given Google Cloud project ID as the billing project for storage access. This is required when accessing data from *requester pays enabled* buckets. See [Requester Pays on Google Cloud Storage documentation](https://cloud.google.com/storage/docs/requester-pays) (default: `false`). +: Use the given Google Cloud project ID as the billing project for storage access (default: `false`). Required when accessing data from [requester pays](https://cloud.google.com/storage/docs/requester-pays) buckets. `google.httpConnectTimeout` : :::{versionadded} 23.06.0-edge ::: -: Defines the HTTP connection timeout for Cloud Storage API requests (default: `'60s'`). +: The HTTP connection timeout for Cloud Storage API requests (default: `'60s'`). `google.httpReadTimeout` : :::{versionadded} 23.06.0-edge ::: -: Defines the HTTP read timeout for Cloud Storage API requests (default: `'60s'`). +: The HTTP read timeout for Cloud Storage API requests (default: `'60s'`). `google.location` : The Google Cloud location where jobs are executed (default: `us-central1`). @@ -918,28 +891,30 @@ Read the {ref}`google-page` page for more information. `google.batch.allowedLocations` : :::{versionadded} 22.12.0-edge ::: -: Define the set of allowed locations for VMs to be provisioned. See [Google documentation](https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#locationpolicy) for details (default: no restriction). +: The set of [allowed locations](https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#locationpolicy) for VMs to be provisioned (default: no restriction). `google.batch.autoRetryExitCodes` : :::{versionadded} 24.07.0-edge ::: -: Defines the list of exit codes that will trigger Google Batch to automatically retry the job (default: `[50001]`). For this setting to take effect, `google.batch.maxSpotAttempts` must be greater than 0. See [Google Batch documentation](https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes) for the complete list of retryable exit codes. +: The list of exit codes that should be automatically retried by Google Batch when `google.batch.maxSpotAttempts` is greater than 0 (default: `[50001]`). +: See [Google Batch documentation](https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes) for the complete list of retryable exit codes. `google.batch.bootDiskImage` : :::{versionadded} 24.08.0-edge ::: -: Set the image URI of the virtual machine boot disk, e.g `batch-debian`. See [Google documentation](https://cloud.google.com/batch/docs/vm-os-environment-overview#vm-os-image-options) for details (default: none). +: The image URI of the virtual machine boot disk, e.g `batch-debian` (default: none). +: See [Google documentation](https://cloud.google.com/batch/docs/vm-os-environment-overview#vm-os-image-options) for details. `google.batch.bootDiskSize` -: Set the size of the virtual machine boot disk, e.g `50.GB` (default: none). +: The size of the virtual machine boot disk, e.g `50.GB` (default: none). `google.batch.cpuPlatform` -: Set the minimum CPU Platform, e.g. `'Intel Skylake'`. See [Specifying a minimum CPU Platform for VM instances](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#specifications) (default: none). +: The [minimum CPU Platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#specifications), e.g. `'Intel Skylake'` (default: none). `google.batch.gcsfuseOptions` : :::{versionadded} 25.03.0-edge ::: -: Defines a list of custom mount options for `gcsfuse` (default: `['-o rw', '-implicit-dirs']`). +: List of custom mount options for `gcsfuse` (default: `['-o rw', '-implicit-dirs']`). `google.batch.maxSpotAttempts` : :::{versionadded} 23.11.0-edge @@ -953,36 +928,35 @@ Read the {ref}`google-page` page for more information. `google.batch.network` : The URL of an existing network resource to which the VM will be attached. - You can specify the network as a full or partial URL. For example, the following are all valid URLs: +: You can specify the network as a full or partial URL. For example, the following are all valid URLs: - - https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} - - projects/{project}/global/networks/{network} - - global/networks/{network} + - `https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}` + - `projects/{project}/global/networks/{network}` + - `global/networks/{network}` `google.batch.networkTags` -: The network tags to be applied to the instances created by Google Batch jobs. Network tags are used to apply firewall rules and control network access (e.g., `['allow-ssh', 'allow-http']`). - -: Network tags are ignored when using instance templates. See [Add network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags) for more information. +: The [network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags) to be applied to the instances created by Google Batch jobs (e.g., `['allow-ssh', 'allow-http']`). +: Network tags are ignored when using instance templates. `google.batch.serviceAccountEmail` -: Define the Google service account email to use for the pipeline execution. If not specified, the default Compute Engine service account for the project will be used. - -: Note that the `google.batch.serviceAccountEmail` service account will only be used for spawned jobs, not for the Nextflow process itself. See the [Google Cloud](https://www.nextflow.io/docs/latest/google.html#credentials) documentation for more information on credentials. +: The Google service account email to use for the pipeline execution. If not specified, the default Compute Engine service account for the project will be used. +: This service account will only be used for tasks submitted by Nextflow, not for Nextflow itself. See {ref}`google-credentials` for more information on Google Cloud credentials. `google.batch.spot` -: When `true` enables the usage of *spot* virtual machines or `false` otherwise (default: `false`). +: Enable the use of spot virtual machines (default: `false`). `google.batch.subnetwork` : The URL of an existing subnetwork resource in the network to which the VM will be attached. - You can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: +: You can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: - - https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork} - - projects/{project}/regions/{region}/subnetworks/{subnetwork} - - regions/{region}/subnetworks/{subnetwork} + - `https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}` + - `projects/{project}/regions/{region}/subnetworks/{subnetwork}` + - `regions/{region}/subnetworks/{subnetwork}` `google.batch.usePrivateAddress` -: When `true` the VM will NOT be provided with a public IP address, and only contain an internal IP. If this option is enabled, the associated job can only load docker images from Google Container Registry, and the job executable cannot use external services other than Google APIs (default: `false`). +: Do not provision public IP addresses for VMs, such that they only have an internal IP address (default: `false`). +: When this option is enabled, jobs can only load Docker images from Google Container Registry, and cannot use external services other than Google APIs. `google.storage.retryPolicy.maxAttempts` : :::{versionadded} 23.11.0-edge @@ -1008,15 +982,15 @@ The `k8s` scope controls the deployment and execution of workflow applications i The following settings are available: `k8s.autoMountHostPaths` -: Automatically mounts host paths into the task pods (default: `false`). Only intended for development purposes when using a single node. +: Automatically mount host paths into the task pods (default: `false`). Only intended for development purposes when using a single node. `k8s.computeResourceType` : :::{versionadded} 22.05.0-edge ::: -: Define whether use Kubernetes `Pod` or `Job` resource type to carry out Nextflow tasks (default: `Pod`). +: Whether to use Kubernetes `Pod` or `Job` resource type to carry out Nextflow tasks (default: `Pod`). `k8s.context` -: Defines the Kubernetes [configuration context name](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) to use. +: The Kubernetes [configuration context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) to use. `k8s.cpuLimits` : :::{versionadded} 24.04.0 @@ -1024,13 +998,10 @@ The following settings are available: : When `true`, set both the pod CPUs `request` and `limit` to the value specified by the `cpus` directive, otherwise set only the `request` (default: `false`). : This setting is useful when a K8s cluster requires a CPU limit to be defined through a [LimitRange](https://kubernetes.io/docs/concepts/policy/limit-range/). -`k8s.debug.yaml` -: When `true`, saves the pod spec for each task to `.command.yaml` in the task directory (default: `false`). - `k8s.fetchNodeName` : :::{versionadded} 22.05.0-edge ::: -: If you trace the hostname, activate this option (default: `false`). +: Include the hostname of each task in the execution trace (default: `false`). `k8s.fuseDevicePlugin` : :::{versionadded} 24.01.0-edge @@ -1040,68 +1011,64 @@ The following settings are available: `k8s.httpConnectTimeout` : :::{versionadded} 22.10.0 ::: -: Defines the Kubernetes client request HTTP connection timeout e.g. `'60s'`. +: The Kubernetes HTTP client request connection timeout e.g. `'60s'`. `k8s.httpReadTimeout` : :::{versionadded} 22.10.0 ::: -: Defines the Kubernetes client request HTTP connection read timeout e.g. `'60s'`. +: The Kubernetes HTTP client request connection read timeout e.g. `'60s'`. -`k8s.launchDir` -: Defines the path where the workflow is launched and the user data is stored. This must be a path in a shared K8s persistent volume (default: `/`). +`k8s.imagePullPolicy` +: The strategy for pulling container images. Can be `IfNotPresent`, `Always`, `Never`. -`k8s.maxErrorRetry` -: :::{versionadded} 22.09.6-edge - ::: -: Defines the Kubernetes API max request retries (default: `4`). +`k8s.launchDir` +: The path where the workflow is launched and the user data is stored (default: `/`). Must be a path in a shared K8s persistent volume. `k8s.namespace` -: Defines the Kubernetes namespace to use (default: `default`). +: The Kubernetes namespace to use (default: `default`). `k8s.pod` -: Allows the definition of one or more pod configuration options such as environment variables, config maps, secrets, etc. It allows the same settings as the {ref}`process-pod` process directive. +: Additional pod configuration options such as environment variables, config maps, secrets, etc. Allows the same settings as the {ref}`process-pod` process directive. : When using the `kuberun` command, this setting also applies to the submitter pod. `k8s.projectDir` -: Defines the path where Nextflow projects are downloaded. This must be a path in a shared K8s persistent volume (default: `/projects`). - -`k8s.pullPolicy` -: Defines the strategy to be used to pull the container image e.g. `'Always'`. - -`k8s.retryPolicy.delay` -: Delay when retrying failed API requests (default: `500ms`). - -`k8s.retryPolicy.jitter` -: Jitter value when retrying failed API requests (default: `0.25`). - -`k8s.retryPolicy.maxAttempts` -: Max attempts when retrying failed API requests (default: `4`). - -`k8s.retryPolicy.maxDelay` -: Max delay when retrying failed API requests (default: `90s`). +: The path where Nextflow projects are downloaded (default: `/projects`). Must be a path in a shared K8s persistent volume. `k8s.runAsUser` -: Defines the user ID to be used to run the containers. Shortcut for the `securityContext` option. +: The user ID to be used to run the containers. Shortcut for the `securityContext` option. `k8s.securityContext` -: Defines the [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for all pods. +: The [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) to use for all pods. `k8s.serviceAccount` -: Defines the Kubernetes [service account name](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to use. +: The Kubernetes [service account name](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to use. `k8s.storageClaimName` -: The name of the persistent volume claim where store workflow result data. +: The name of the persistent volume claim where the shared work directory is stored. `k8s.storageMountPath` -: The path location used to mount the persistent volume claim (default: `/workspace`). +: The mount path for the persistent volume claim (default: `/workspace`). `k8s.storageSubPath` : The path in the persistent volume to be mounted (default: `/`). `k8s.workDir` -: Defines the path where the workflow temporary data is stored. This must be a path in a shared K8s persistent volume (default: `/work`). +: The path of the shared work directory (default: `/work`). Must be a path in a shared K8s persistent volume. + +`k8s.debug.yaml` +: Save the pod spec for each task to `.command.yaml` in the task directory (default: `false`). + +`k8s.retryPolicy.delay` +: Delay when retrying failed API requests (default: `250ms`). + +`k8s.retryPolicy.jitter` +: Jitter value when retrying failed API requests (default: `0.25`). -See the {ref}`k8s-page` page for more details. +`k8s.retryPolicy.maxAttempts` +: Max attempts when retrying failed API requests (default: `4`). + +`k8s.retryPolicy.maxDelay` +: Max delay when retrying failed API requests (default: `90s`). (config-lineage)= @@ -1115,7 +1082,7 @@ The following settings are available: : Enable generation of lineage metadata (default: `false`). `lineage.store.location` -: Defines the location of the lineage metadata store (default: `./.lineage`). +: The location of the lineage metadata store (default: `./.lineage`). (config-mail)= @@ -1126,7 +1093,7 @@ The `mail` scope controls the mail server used to send email notifications. The following settings are available: `mail.debug` -: Enables Java Mail logging for debugging purposes (default: `false`). +: Enable Java Mail logging for debugging purposes (default: `false`). `mail.from` : Default email sender address. @@ -1134,15 +1101,15 @@ The following settings are available: `mail.smtp.host` : Host name of the mail server. +`mail.smtp.password` +: User password to connect to the mail server. + `mail.smtp.port` : Port number of the mail server. `mail.smtp.user` : User name to connect to the mail server. -`mail.smtp.password` -: User password to connect to the mail server. - `mail.smtp.proxy.host` : Host name of an HTTP web proxy server that will be used for connections to the mail server. @@ -1152,35 +1119,11 @@ The following settings are available: `mail.smtp.*` : Any SMTP configuration property supported by the [Java Mail API](https://javaee.github.io/javamail/), which Nextflow uses to send emails. See the table of available properties [here](https://javaee.github.io/javamail/docs/api/com/sun/mail/smtp/package-summary.html#properties). -For example, the following snippet shows how to configure Nextflow to send emails through the [AWS Simple Email Service](https://aws.amazon.com/ses/): - -```groovy -mail { - smtp.host = 'email-smtp.us-east-1.amazonaws.com' - smtp.port = 587 - smtp.user = '' - smtp.password = '' - smtp.auth = true - smtp.starttls.enable = true - smtp.starttls.required = true -} -``` - -:::{note} -Some versions of Java (e.g. Java 11 Corretto) do not default to TLS v1.2, and as a result may have issues with 3rd party integrations that enforce TLS v1.2 (e.g. Azure Active Directory OIDC). This problem can be addressed by setting the following config option: - -```groovy -mail { - smtp.ssl.protocols = 'TLSv1.2' -} -``` -::: - (config-manifest)= ## `manifest` -The `manifest` scope allows you to define some meta-data information needed when publishing or running your pipeline. +The `manifest` scope allows you to define some metadata that is useful when publishing or running your pipeline. The following settings are available: @@ -1193,13 +1136,27 @@ The following settings are available: `manifest.contributors` : :::{versionadded} 24.09.0-edge ::: -: List of project contributors. Should be a list of maps. The following fields are supported in the contributor map: - - `name`: the contributor's name - - `affiliation`: the contributor's affiliated organization - - `email`: the contributor's email address - - `github`: the contributor's GitHub URL - - `contribution`: list of contribution types, each element can be one of `'author'`, `'maintainer'`, or `'contributor'` - - `orcid`: the contributor's [ORCID](https://orcid.org/) URL +: List of project contributors. Should be a list of maps. + +: The following fields are supported in the contributor map: + + `name` + : The contributor name. + + `affiliation` + : The contributor affiliated organization. + + `email` + : The contributor email address. + + `github` + : The contributor GitHub URL. + + `contribution` + : List of contribution types, each element can be one of `'author'`, `'maintainer'`, or `'contributor'`. + + `orcid` + : The contributor [ORCID](https://orcid.org/) URL. `manifest.defaultBranch` : Git repository default branch (default: `master`). @@ -1213,6 +1170,10 @@ The following settings are available: `manifest.doi` : Project related publication DOI identifier. +`manifest.gitmodules` +: Controls whether git sub-modules should be cloned with the main repository. +: Can be either a boolean value, a list of submodule names, or a comma-separated string of submodule names. + `manifest.homePage` : Project home page URL. @@ -1231,7 +1192,7 @@ The following settings are available: `manifest.nextflowVersion` : Minimum required Nextflow version. - This setting may be useful to ensure that a specific version is used: +: This setting may be useful to ensure that a specific version is used: ```groovy manifest.nextflowVersion = '1.2.3' // exact match @@ -1241,8 +1202,10 @@ The following settings are available: manifest.nextflowVersion = '!>=1.2' // with ! prefix, stop execution if current version does not match required version. ``` +: See {ref}`stdlib-types-versionnumber` for details. + `manifest.organization` -: Project organization +: Project organization. `manifest.recurseSubmodules` : Pull submodules recursively from the Git repository. @@ -1250,8 +1213,6 @@ The following settings are available: `manifest.version` : Project version number. -Read the {ref}`sharing-page` page to learn how to publish your pipeline to GitHub, BitBucket or GitLab. - (config-nextflow)= ## `nextflow` @@ -1280,26 +1241,24 @@ The `workflow.output.retryPolicy` settings were moved to `nextflow.retryPolicy`. ## `notification` -The `notification` scope allows you to define the automatic sending of a notification email message when the workflow execution terminates. +The `notification` scope controls the automatic sending of an email notification on workflow completion. + +The following settings are available: `notification.attributes` -: A map object modelling the variables that can be used in the template file. +: Map of variables that can be used in the template file. `notification.enabled` -: Send a notification message when the workflow execution completes (default: `false`). +: Send an email notification when the workflow execution completes (default: `false`). `notification.from` -: Sender address for the notification email message. +: Sender address for the email notification. `notification.template` -: Path of a template file which provides the content of the notification message. +: Path of a template file containing the contents of the email notification. `notification.to` -: Recipient address for the notification email. Multiple addresses can be specified separating them with a comma. - -The notification message is sent my using the STMP server defined in the configuration {ref}`mail scope`. - -If no mail configuration is provided, it tries to send the notification message by using the external mail command eventually provided by the underlying system (e.g. `sendmail` or `mail`). +: Recipient address for the email notification. Multiple addresses can be specified as a comma-separated list. (config-podman)= @@ -1319,7 +1278,7 @@ The following settings are available: : Comma separated list of environment variable names to be included in the container environment. `podman.mountFlags` -: Add the specified flags to the volume mounts e.g. `mountFlags = 'ro,Z'`. +: Add the specified flags to the volume mounts e.g. `'ro,Z'`. `podman.registry` : The registry from where container images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. @@ -1331,9 +1290,7 @@ The following settings are available: : Specify extra command line options supported by the `podman run` command. `podman.temp` -: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `auto` to create a temporary directory each time a container is created. - -Read the {ref}`container-podman` page to learn more about how to use Podman containers with Nextflow. +: Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. (config-report)= @@ -1347,16 +1304,16 @@ The following settings are available: : Create the execution report on workflow completion (default: `false`). `report.file` -: The path of the created execution report file (default: `report-.html`). +: The path of the created execution report file (default: `'report-.html'`). `report.overwrite` -: When `true` overwrites any existing report file with the same name (default: `false`). +: Overwrite any existing report file with the same name (default: `false`). (config-sarus)= ## `sarus` -The ``sarus`` scope controls how [Sarus](https://sarus.readthedocs.io) containers are executed by Nextflow. +The `sarus` scope controls how [Sarus](https://sarus.readthedocs.io) containers are executed by Nextflow. The following settings are available: @@ -1364,16 +1321,15 @@ The following settings are available: : Execute tasks with Sarus containers (default: `false`). `sarus.envWhitelist` -: Comma separated list of environment variable names to be included in the container environment. +: Comma-separated list of environment variable names to be included in the container environment. `sarus.runOptions` -: Specify extra command line options supported by the `sarus run` command. For details see the [Sarus user guide](https://sarus.readthedocs.io/en/stable/user/user_guide.html). +: Specify extra command line options supported by the `sarus run` command. +: See the [Sarus user guide](https://sarus.readthedocs.io/en/stable/user/user_guide.html) for details. `sarus.tty` : Allocates a pseudo-tty (default: `false`). -Read the {ref}`container-sarus` page to learn more about how to use Sarus containers with Nextflow. - (config-shifter)= ## `shifter` @@ -1385,7 +1341,8 @@ The following settings are available: `shifter.enabled` : Execute tasks with Shifter containers (default: `false`). -Read the {ref}`container-shifter` page to learn more about how to use Shifter containers with Nextflow. +`shifter.envWhitelist` +: Comma-separated list of environment variable names to be included in the container environment. (config-singularity)= @@ -1396,13 +1353,13 @@ The `singularity` scope controls how [Singularity](https://sylabs.io/singularity The following settings are available: `singularity.autoMounts` -: Automatically mounts host paths in the executed container (default: `true`). It requires the `user bind control` feature to be enabled in your Singularity installation. : :::{versionchanged} 23.09.0-edge Default value was changed from `false` to `true`. ::: +: Automatically mount host paths in the executed container (default: `true`). It requires the `user bind control` feature to be enabled in your Singularity installation. `singularity.cacheDir` -: The directory where remote Singularity images are stored. When using a computing cluster it must be a shared folder accessible to all compute nodes. +: The directory where remote Singularity images are stored. When using a compute cluster, it must be a shared folder accessible to all compute nodes. `singularity.enabled` : Execute tasks with Singularity containers (default: `false`). @@ -1422,7 +1379,8 @@ The following settings are available: `singularity.ociAutoPull` : :::{versionadded} 23.12.0-edge ::: -: When enabled, OCI (and Docker) container images are pull and converted to a SIF image file format implicitly by the Singularity run command, instead of Nextflow. Requires Singularity 3.11 or later (default: `false`). +: *Requires Singularity 3.11 or later* +: When enabled, OCI (and Docker) container images are pull and converted to a SIF image file format implicitly by the Singularity run command, instead of Nextflow (default: `false`). :::{note} Leave `ociAutoPull` disabled if willing to build a Singularity native image with Wave (see the {ref}`wave-singularity` section). @@ -1431,14 +1389,16 @@ The following settings are available: `singularity.ociMode` : :::{versionadded} 23.12.0-edge ::: -: Enable OCI-mode, that allows running native OCI compliant container image with Singularity using `crun` or `runc` as low-level runtime. Note: it requires Singularity 4 or later. See `--oci` flag in the [Singularity documentation](https://docs.sylabs.io/guides/4.0/user-guide/oci_runtime.html#oci-mode) for more details and requirements (default: `false`). +: *Requires Singularity 4 or later* +: Enable OCI-mode, that allows running native OCI compliant container image with Singularity using `crun` or `runc` as low-level runtime (default: `false`). +: See `--oci` flag in the [Singularity documentation](https://docs.sylabs.io/guides/4.0/user-guide/oci_runtime.html#oci-mode) for more details and requirements (default: `false`). :::{note} Leave `ociMode` disabled if you are willing to build a Singularity native image with Wave (see the {ref}`wave-singularity` section). ::: `singularity.pullTimeout` -: The amount of time the Singularity pull can last, exceeding which the process is terminated (default: `20 min`). +: The amount of time the Singularity pull can last, after which the process is terminated (default: `20 min`). `singularity.registry` : :::{versionadded} 22.12.0-edge @@ -1448,8 +1408,6 @@ The following settings are available: `singularity.runOptions` : Specify extra command line options supported by `singularity exec`. -Read the {ref}`container-singularity` page to learn more about how to use Singularity containers with Nextflow. - (config-spack)= ## `spack` @@ -1459,18 +1417,20 @@ The `spack` scope controls the creation of a Spack environment by the Spack pack The following settings are available: `spack.cacheDir` -: Defines the path where Spack environments are stored. When using a compute cluster make sure to provide a shared file system path accessible from all compute nodes. +: The path where Spack environments are stored. It should be accessible from all compute nodes when using a shared file system. `spack.checksum` -: Enables checksum verification for source tarballs (default: `true`). Only disable when requesting a package version not yet encoded in the corresponding Spack recipe. +: Enable checksum verification of source tarballs (default: `true`). +: Only disable when requesting a package version not yet encoded in the corresponding Spack recipe. `spack.createTimeout` -: Defines the amount of time the Spack environment creation can last (default: `60 min`). The creation process is terminated when the timeout is exceeded. +: The amount of time to wait for the Spack environment to be created before failing (default: `60 min`). -`spack.parallelBuilds` -: Sets number of parallel package builds (Spack default: coincides with number of available CPU cores). +`spack.enabled` +: Execute tasks with Spack environments (default: `false`). -Nextflow does not allow for fine-grained configuration of the Spack package manager. Instead, this has to be performed directly on the host Spack installation. For more information see the [Spack documentation](https://spack.readthedocs.io). +`spack.parallelBuilds` +: The maximum number of parallel package builds (default: the number of available CPUs). (config-timeline)= @@ -1484,31 +1444,31 @@ The following settings are available: : Create the timeline report on workflow completion file (default: `false`). `timeline.file` -: Timeline file name (default: `timeline-.html`). +: Timeline file name (default: `'timeline-.html'`). `timeline.overwrite` -: When `true` overwrites any existing timeline file with the same name (default: `false`). +: Overwrite any existing timeline file with the same name (default: `false`). (config-tower)= ## `tower` -The `tower` scope controls the settings for the [Seqera Platform](https://seqera.io) (formerly Tower Cloud). +The `tower` scope controls the settings for [Seqera Platform](https://seqera.io) (formerly Tower Cloud). The following settings are available: `tower.accessToken` -: The unique access token specific to your account on an instance of Seqera Platform. +: The unique access token for your Seqera Platform account. : Your `accessToken` can be obtained from your Seqera Platform instance in the [Tokens page](https://cloud.seqera.io/tokens). `tower.enabled` -: Send workflow tracing and execution metrics to Seqera Platform (default: `false`). +: Enable workflow monitoring with Seqera Platform (default: `false`). `tower.endpoint` : The endpoint of your Seqera Platform instance (default: `https://api.cloud.seqera.io`). `tower.workspaceId` -: The ID of the Seqera Platform workspace where the run should be added (default: the launching user personal workspace). +: The workspace ID in Seqera Platform in which to save the run (default: the launching user's personal workspace). : The workspace ID can also be specified using the environment variable `TOWER_WORKSPACE_ID` (config file has priority over the environment variable). (config-trace)= @@ -1523,22 +1483,20 @@ The following settings are available: : Create the execution trace file on workflow completion (default: `false`). `trace.fields` -: Comma separated list of fields to be included in the report. The available fields are listed at {ref}`this page `. +: Comma-separated list of {ref}`trace fields ` to include in the report. `trace.file` -: Trace file name (default: `trace-.txt`). +: Trace file name (default: `'trace-.txt'`). `trace.overwrite` -: When `true` overwrites any existing trace file with the same name (default: `false`). +: Overwrite any existing trace file with the same name (default: `false`). `trace.raw` -: When `true` turns on raw number report generation i.e. date and time are reported as milliseconds and memory as number of bytes (default: `false`). +: Report trace metrics as raw numbers where applicable, i.e. report duration values in milliseconds and memory values in bytes (default: `false`). `trace.sep` : Character used to separate values in each row (default: `\t`). -Read the {ref}`trace-report` page to learn more about the execution report that can be generated by Nextflow. - (config-wave)= ## `wave` @@ -1550,22 +1508,46 @@ The following settings are available: `wave.enabled` : Enable the use of Wave containers (default: `false`). -`wave.build.repository` -: The container repository where images built by Wave are uploaded (note: the corresponding credentials must be provided in your Seqera Platform account). +`wave.endpoint` +: The Wave service endpoint (default: `https://wave.seqera.io`). + +`wave.freeze` +: :::{versionadded} 23.07.0-edge + ::: +: Enable Wave container freezing (default: `false`). Wave will provision a non-ephemeral container image that will be pushed to a container repository of your choice. +: The target registry must be specified using the `wave.build.repository` setting. It is also recommended to specify a custom cache repository using `wave.build.cacheRepository`. +: :::{note} + The container registry authentication must be managed by the underlying infrastructure. + ::: + +`wave.mirror` +: :::{versionadded} 24.09.1-edge + ::: +: Enable Wave container mirroring (default: `false`). Wave will mirror (i.e. copy) the containers in your pipeline to a container registry of your choice, so that pipeline tasks can pull the containers from this registry instead of the original one. +: The mirrored containers will have the same name, digest, and metadata. +: The target registry must be specified using the `wave.build.repository` setting. +: This option is only compatible with `wave.strategy = 'container'`. It cannot be used with `wave.freeze`. +: :::{note} + The container registry authentication must be managed by the underlying infrastructure. + ::: + +`wave.strategy` +: The strategy to be used when resolving multiple Wave container requirements (default: `'container,dockerfile,conda'`). `wave.build.cacheRepository` -: The container repository used to cache image layers built by the Wave service (note: the corresponding credentials must be provided in your Seqera Platform account). +: The container repository used to cache image layers built by the Wave service. +: The corresponding credentials must be provided in your Seqera Platform account. `wave.build.compression.mode` : :::{versionadded} 25.05.0-edge ::: -: Defines the compression algorithm that should be used when building the container. Allowed values are: `gzip`, `estargz` and `zstd` (default: `gzip`). +: The compression algorithm that should be used when building the container. Allowed values are: `gzip`, `estargz` and `zstd` (default: `gzip`). `wave.build.compression.level` : :::{versionadded} 25.05.0-edge ::: : Level of compression used when building a container depending the chosen algorithm: gzip, estargz (0-9) and zstd (0-22). - . + `wave.build.compression.force` : :::{versionadded} 25.05.0-edge ::: @@ -1580,37 +1562,19 @@ The following settings are available: `wave.build.conda.mambaImage` : The Mamba container image is used to build Conda based container. This is expected to be [micromamba-docker](https://github.com/mamba-org/micromamba-docker) image. -`wave.endpoint` -: The Wave service endpoint (default: `https://wave.seqera.io`). - -`wave.freeze` -: :::{versionadded} 23.07.0-edge - ::: -: Enables Wave container freezing (default: `false`). Wave will provision a non-ephemeral container image that will be pushed to a container repository of your choice. It requires the use of the `wave.build.repository` setting. -: It is also recommended to specify a custom cache repository using `wave.build.cacheRepository`. -: :::{note} - The container repository authentication must be managed by the underlying infrastructure. - ::: +`wave.build.repository` +: The container repository where images built by Wave are uploaded. +: The corresponding credentials must be provided in your Seqera Platform account. -`wave.httpClient.connectTime` +`wave.httpClient.connectTimeout` : :::{versionadded} 22.06.0-edge ::: -: Sets the connection timeout duration for the HTTP client connecting to the Wave service (default: `30s`). +: The connection timeout for the Wave HTTP client (default: `30s`). -`wave.mirror` -: :::{versionadded} 24.09.1-edge - ::: -: Enables Wave container mirroring (default: `false`). -: This feature allow mirroring (i.e. copying) the containers defined in your pipeline - configuration to a container registry of your choice, so that pipeline tasks will pull the copied containers from the - target registry instead of the original one. -: The resulting copied containers will maintain the name, digest and metadata. -: The target registry is expected to be specified by using the `wave.build.repository` option. -: :::{note} - * This feature is only compatible with `wave.strategy = 'container'` option. - * This feature cannot be used with Wave *freeze* mode. - * The authentication of the resulting container images must be managed by the underlying infrastructure. +`wave.httpClient.maxRate` +: :::{versionadded} 25.01.0-edge ::: +: The maximum request rate for the Wave HTTP client (default: `1/sec`). `wave.retryPolicy.delay` : :::{versionadded} 22.06.0-edge @@ -1632,28 +1596,30 @@ The following settings are available: ::: : The max delay when a failing HTTP request is retried (default: `90s`). -`wave.scan.mode` +`wave.scan.allowedLevels` : :::{versionadded} 24.09.1-edge ::: -: Determines the container security scanning execution modality. +: Comma-separated list of allowed vulnerability levels when scanning containers for security vulnerabilities in `required` mode. -: This feature allows scanning for security vulnerability the container used in your pipeline. The following options can be specified: +: Allowed values are: `low`, `medium`, `high`, `critical`. - * `none`: No security scan is performed on the containers used by your pipeline. - * `async`: The containers used by your pipeline are scanned for security vulnerability. The task execution is carried out independently of the security scan result. - * `required`: The containers used by your pipeline are scanned for security vulnerability. The task is only executed if the corresponding container is not affected by a security vulnerability. +: This option requires `wave.scan.mode = 'required'`. -`wave.scan.allowedLevels` +`wave.scan.mode` : :::{versionadded} 24.09.1-edge ::: -: Determines the allowed security levels when scanning containers for security vulnerabilities. +: Enable Wave container security scanning. Wave will scan the containers in your pipeline for security vulnerabilities. -: Allowed values are: `low`, `medium`, `high`, `critical`. For example: `wave.scan.allowedLevels = 'low,medium'`. +: The following options can be specified: -: This option requires the use of `wave.scan.mode = 'required'`. + `'none'` + : No security scanning is performed. -`wave.strategy` -: The strategy to be used when resolving ambiguous Wave container requirements (default: `'container,dockerfile,conda'`). + `'async'` + : The containers used by your pipeline are scanned for security vulnerabilities. The task execution is carried out regardless of the security scan result. + + `'required'` + : The containers used by your pipeline are scanned for security vulnerabilities. The task is only executed if the corresponding container is free of vulnerabilities. (config-workflow)= @@ -1664,6 +1630,8 @@ The following settings are available: The `workflow` scope provides workflow execution options. +The following settings are available: + `workflow.failOnIgnore` : :::{versionadded} 24.05.0-edge ::: @@ -1692,7 +1660,9 @@ The `workflow` scope provides workflow execution options. : When `true`, the workflow will not fail if a file can't be published for some reason (default: `false`). `workflow.output.mode` -: The file publishing method (default: `'symlink'`). The following options are available: +: The file publishing method (default: `'symlink'`). + +: The following options are available: `'copy'` : Copy each file into the output directory. @@ -1714,7 +1684,9 @@ The `workflow` scope provides workflow execution options. : Create an absolute symbolic link in the output directory for each output file. `workflow.output.overwrite` -: When `true` any existing file in the specified folder will be overwritten (default: `'standard'`). The following options are available: +: When `true` any existing file in the specified folder will be overwritten (default: `'standard'`). + +: The following options are available: `false` : Never overwrite existing files. @@ -1737,7 +1709,9 @@ The `workflow` scope provides workflow execution options. `workflow.output.tags` : *Currently only supported for S3.* -: Specify arbitrary tags for published files. For example: +: Specify arbitrary tags for published files. + +: For example: ```groovy - tags FOO: 'hello', BAR: 'world' + workflow.output.tags = [FOO: 'hello', BAR: 'world'] ``` diff --git a/docs/spack.md b/docs/spack.md index 8b3f243b24..1f044afc04 100644 --- a/docs/spack.md +++ b/docs/spack.md @@ -164,8 +164,11 @@ profiles { } ``` -The above configuration snippet allows the execution either with Spack or Docker by specifying `-profile spack` or -`-profile docker` when running the pipeline script. +The above configuration snippet allows the execution either with Spack or Docker by specifying `-profile spack` or `-profile docker` when running the pipeline script. + +:::{note} +Nextflow does not allow for fine-grained configuration of the Spack package manager. Instead, this has to be performed directly on the host Spack installation. For more information see the [Spack documentation](https://spack.readthedocs.io). +::: ## Advanced settings diff --git a/modules/nextflow/src/main/groovy/nextflow/Nextflow.groovy b/modules/nextflow/src/main/groovy/nextflow/Nextflow.groovy index 093b02dee8..4d7d38a6d9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/Nextflow.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/Nextflow.groovy @@ -348,11 +348,8 @@ class Nextflow { * - attach: One or more list attachment */ static void sendMail( Map params ) { - - new Mailer() - .setConfig(Global.session.config.mail as Map) - .send(params) - + final opts = Global.session.config.mail as Map ?: Collections.emptyMap() + new Mailer(opts).send(params) } /** @@ -374,9 +371,8 @@ class Nextflow { * */ static void sendMail( Closure params ) { - new Mailer() - .setConfig(Global.session.config.mail as Map) - .send(params) + final opts = Global.session.config.mail as Map ?: Collections.emptyMap() + new Mailer(opts).send(params) } /** diff --git a/modules/nextflow/src/main/groovy/nextflow/Session.groovy b/modules/nextflow/src/main/groovy/nextflow/Session.groovy index f1a182953a..0e8ae74108 100644 --- a/modules/nextflow/src/main/groovy/nextflow/Session.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/Session.groovy @@ -37,7 +37,14 @@ import nextflow.cache.CacheDB import nextflow.cache.CacheFactory import nextflow.conda.CondaConfig import nextflow.config.Manifest +import nextflow.container.ApptainerConfig +import nextflow.container.CharliecloudConfig import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig +import nextflow.container.PodmanConfig +import nextflow.container.SarusConfig +import nextflow.container.ShifterConfig +import nextflow.container.SingularityConfig import nextflow.dag.DAG import nextflow.exception.AbortOperationException import nextflow.exception.AbortSignalException @@ -436,7 +443,7 @@ class Session implements ISession { } // set the byte-code target directory - this.disableRemoteBinDir = getExecConfigProp(null, 'disableRemoteBinDir', false) + this.disableRemoteBinDir = (config.executor as Map)?.disableRemoteBinDir as boolean this.classesDir = FileHelper.createLocalDir() this.executorFactory = new ExecutorFactory(Plugins.manager) this.observersV2 = createObserversV2() @@ -1178,14 +1185,14 @@ class Session implements ISession { @Memoized CondaConfig getCondaConfig() { - final cfg = config.conda as Map ?: Collections.emptyMap() - return new CondaConfig(cfg, getSystemEnv()) + final opts = config.conda as Map ?: Collections.emptyMap() + return new CondaConfig(opts, getSystemEnv()) } @Memoized SpackConfig getSpackConfig() { - final cfg = config.spack as Map ?: Collections.emptyMap() - return new SpackConfig(cfg, getSystemEnv()) + final opts = config.spack as Map ?: Collections.emptyMap() + return new SpackConfig(opts, getSystemEnv()) } /** @@ -1201,91 +1208,38 @@ class Session implements ISession { @Memoized ContainerConfig getContainerConfig(String engine) { - final allEngines = new LinkedList() - getContainerConfig0('docker', allEngines) - getContainerConfig0('podman', allEngines) - getContainerConfig0('sarus', allEngines) - getContainerConfig0('shifter', allEngines) - getContainerConfig0('udocker', allEngines) - getContainerConfig0('singularity', allEngines) - getContainerConfig0('apptainer', allEngines) - getContainerConfig0('charliecloud', allEngines) + final allConfigs = [ + new DockerConfig(config.docker as Map ?: Collections.emptyMap()), + new PodmanConfig(config.podman as Map ?: Collections.emptyMap()), + new SarusConfig(config.sarus as Map ?: Collections.emptyMap()), + new ShifterConfig(config.shifter as Map ?: Collections.emptyMap()), + new SingularityConfig(config.singularity as Map ?: Collections.emptyMap()), + new ApptainerConfig(config.apptainer as Map ?: Collections.emptyMap()), + new CharliecloudConfig(config.charliecloud as Map ?: Collections.emptyMap()), + ] as List if( engine ) { - final result = allEngines.find(it -> it.engine==engine) ?: [engine: engine] - return new ContainerConfig(result) + return allConfigs.find { it -> it.engine == engine } } - final enabled = allEngines.findAll(it -> it.enabled?.toString() == 'true') - if( enabled.size() > 1 ) { - final names = enabled.collect(it -> it.engine) + final allEnabled = allConfigs.findAll { it -> it.enabled } + if( allEnabled.size() > 1 ) { + final names = allEnabled.collect { it -> it.engine } throw new IllegalConfigException("Cannot enable more than one container engine -- Choose either one of: ${names.join(', ')}") } - if( enabled ) { - return new ContainerConfig(enabled.get(0)) + if( allEnabled ) { + return allEnabled.first() } - if( allEngines ) { - return new ContainerConfig(allEngines.get(0)) + if( allConfigs ) { + return allConfigs.first() } - return new ContainerConfig(engine:'docker') + return new DockerConfig([:]) } ContainerConfig getContainerConfig() { return getContainerConfig(null) } - private void getContainerConfig0(String engine, List drivers) { - assert engine - final entry = this.config?.get(engine) - if( entry instanceof Map ) { - final config0 = new LinkedHashMap() - config0.putAll((Map)entry) - config0.put('engine', engine) - drivers.add(config0) - } - else if( entry!=null ) { - log.warn "Malformed configuration for container engine '$engine' -- One or more attributes should be provided" - } - } - - @Memoized - def getExecConfigProp( String execName, String name, Object defValue, Map env = null ) { - def result = ConfigHelper.getConfigProperty(config.executor, execName, name ) - if( result != null ) - return result - - // -- try to fallback sys env - def key = "NXF_EXECUTOR_${name.toUpperCase().replaceAll(/\./,'_')}".toString() - if( env == null ) env = System.getenv() - return env.containsKey(key) ? env.get(key) : defValue - } - - @Memoized - def getConfigAttribute(String name, defValue ) { - def result = getMap0(getConfig(),name,name) - if( result != null ) - return result - - def key = "NXF_${name.toUpperCase().replaceAll(/\./,'_')}".toString() - def env = getSystemEnv() - return (env.containsKey(key) ? env.get(key) : defValue) - } - - private getMap0(Map map, String name, String fqn) { - def p=name.indexOf('.') - if( p == -1 ) - return map.get(name) - else { - def k=name.substring(0,p) - def v=map.get(k) - if( v == null ) - return null - if( v instanceof Map ) - return getMap0(v,name.substring(p+1),fqn) - throw new IllegalArgumentException("Not a valid config attribute: $fqn -- Missing element: $k") - } - } - @Memoized protected Map getSystemEnv() { new HashMap(System.getenv()) @@ -1352,68 +1306,6 @@ class Session implements ISession { return String.valueOf(val) } - - /** - * Defines the number of tasks the executor will handle in a parallel manner - * - * @param execName The executor name - * @param defValue The default value if setting is not defined in the configuration file - * @return The value of tasks to handle in parallel - */ - @Memoized - int getQueueSize( String execName, int defValue ) { - getExecConfigProp(execName, 'queueSize', defValue) as int - } - - /** - * Determines how often a poll occurs to check for a process termination - * - * @param execName The executor name - * @param defValue The default value if setting is not defined in the configuration file - * @return A {@code Duration} object. Default '1 second' - */ - @Memoized - Duration getPollInterval( String execName, Duration defValue = Duration.of('1sec') ) { - getExecConfigProp( execName, 'pollInterval', defValue ) as Duration - } - - /** - * Determines how long the executors waits before return an error status when a process is - * terminated but the exit file does not exist or it is empty. This setting is used only by grid executors - * - * @param execName The executor name - * @param defValue The default value if setting is not defined in the configuration file - * @return A {@code Duration} object. Default '90 second' - */ - @Memoized - Duration getExitReadTimeout( String execName, Duration defValue = Duration.of('90sec') ) { - getExecConfigProp( execName, 'exitReadTimeout', defValue ) as Duration - } - - /** - * Determines how often the executor status is written in the application log file - * - * @param execName The executor name - * @param defValue The default value if setting is not defined in the configuration file - * @return A {@code Duration} object. Default '5 minutes' - */ - @Memoized - Duration getMonitorDumpInterval( String execName, Duration defValue = Duration.of('5min')) { - getExecConfigProp(execName, 'dumpInterval', defValue) as Duration - } - - /** - * Determines how often the queue status is fetched from the cluster system. This setting is used only by grid executors - * - * @param execName The executor name - * @param defValue The default value if setting is not defined in the configuration file - * @return A {@code Duration} object. Default '1 minute' - */ - @Memoized - Duration getQueueStatInterval( String execName, Duration defValue = Duration.of('1min') ) { - getExecConfigProp(execName, 'queueStatInterval', defValue) as Duration - } - void printConsole(String str, boolean newLine=false) { if( ansiLogObserver ) ansiLogObserver.appendInfo(str) diff --git a/modules/nextflow/src/main/groovy/nextflow/conda/CondaCache.groovy b/modules/nextflow/src/main/groovy/nextflow/conda/CondaCache.groovy index 9cb8a28bbe..c019d32029 100644 --- a/modules/nextflow/src/main/groovy/nextflow/conda/CondaCache.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/conda/CondaCache.groovy @@ -28,6 +28,7 @@ import groovy.util.logging.Slf4j import groovyx.gpars.dataflow.DataflowVariable import groovyx.gpars.dataflow.LazyDataflowVariable import nextflow.Global +import nextflow.SysEnv import nextflow.file.FileMutex import nextflow.util.CacheHelper import nextflow.util.Duration @@ -56,7 +57,7 @@ class CondaCache { /** * Timeout after which the environment creation is aborted */ - private Duration createTimeout = Duration.of('20min') + private Duration createTimeout private String createOptions @@ -72,7 +73,7 @@ class CondaCache { @PackageScope Duration getCreateTimeout() { createTimeout } - @PackageScope Map getEnv() { System.getenv() } + @PackageScope Map getEnv() { SysEnv.get() } @PackageScope Path getConfigCacheDir0() { configCacheDir0 } diff --git a/modules/nextflow/src/main/groovy/nextflow/conda/CondaConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/conda/CondaConfig.groovy index ea79a435b2..89af2f10cb 100644 --- a/modules/nextflow/src/main/groovy/nextflow/conda/CondaConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/conda/CondaConfig.groovy @@ -19,6 +19,10 @@ package nextflow.conda import java.nio.file.Path import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description import nextflow.util.Duration /** @@ -26,58 +30,100 @@ import nextflow.util.Duration * * @author Paolo Di Tommaso */ +@ScopeName("conda") +@Description(""" + The `conda` scope controls the creation of Conda environments by the Conda package manager. +""") @CompileStatic -class CondaConfig extends LinkedHashMap { +class CondaConfig implements ConfigScope { - private Map env + @ConfigOption + @Description(""" + Execute tasks with Conda environments (default: `false`). + """) + final boolean enabled - /* required by Kryo deserialization -- do not remove */ - private CondaConfig() { } + @ConfigOption + @Description(""" + The path where Conda environments are stored. It should be accessible from all compute nodes when using a shared file system. + """) + final String cacheDir - CondaConfig(Map config, Map env) { - super(config) - this.env = env - } + @ConfigOption + @Description(""" + The list of Conda channels that can be used to resolve Conda packages. Channel priority decreases from left to right. + """) + final List channels + + @ConfigOption + @Description(""" + Extra command line options for the `conda create` command. See the [Conda documentation](https://docs.conda.io/projects/conda/en/latest/commands/create.html) for more information. + """) + final String createOptions + + @ConfigOption + @Description(""" + The amount of time to wait for the Conda environment to be created before failing (default: `20 min`). + """) + final Duration createTimeout + + @ConfigOption + @Description(""" + Use [Mamba](https://github.com/mamba-org/mamba) instead of `conda` to create Conda environments (default: `false`). + """) + final boolean useMamba - boolean isEnabled() { - def enabled = get('enabled') - if( enabled == null ) - enabled = env.get('NXF_CONDA_ENABLED') - return enabled?.toString() == 'true' + @ConfigOption + @Description(""" + Use [Micromamba](https://mamba.readthedocs.io/en/latest/user_guide/micromamba.html) instead of `conda` to create Conda environments (default: `false`). + """) + final boolean useMicromamba + + /* required by extension point -- do not remove */ + CondaConfig() {} + + CondaConfig(Map opts, Map env) { + enabled = opts.enabled != null + ? opts.enabled as boolean + : (env.NXF_CONDA_ENABLED?.toString() == 'true') + cacheDir = opts.cacheDir + channels = parseChannels(opts.channels) + createOptions = opts.createOptions + createTimeout = opts.createTimeout as Duration ?: Duration.of('20min') + useMamba = opts.useMamba as boolean + useMicromamba = opts.useMicromamba as boolean + + if( useMamba && useMicromamba ) + throw new IllegalArgumentException("Both conda.useMamba and conda.useMicromamba were enabled -- Please choose only one") } - List getChannels() { - final value = get('channels') - if( !value ) { - return Collections.emptyList() - } - if( value instanceof List ) { + private List parseChannels(Object value) { + if( !value ) + return Collections.emptyList() + if( value instanceof List ) return value - } - if( value instanceof CharSequence ) { + if( value instanceof CharSequence ) return value.tokenize(',').collect(it -> it.trim()) - } - throw new IllegalArgumentException("Unexpected conda.channels value: $value") } Duration createTimeout() { - get('createTimeout') as Duration + createTimeout } String createOptions() { - get('createOptions') as String + createOptions } Path cacheDir() { - get('cacheDir') as Path + cacheDir as Path } boolean useMamba() { - get('useMamba') as boolean + useMamba } boolean useMicromamba() { - get('useMicromamba') as boolean + useMicromamba } } diff --git a/modules/nextflow/src/main/groovy/nextflow/config/ConfigBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/config/ConfigBuilder.groovy index 0341f73523..c37e40591c 100644 --- a/modules/nextflow/src/main/groovy/nextflow/config/ConfigBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/config/ConfigBuilder.groovy @@ -26,6 +26,7 @@ import groovy.transform.PackageScope import groovy.util.logging.Slf4j import nextflow.Const import nextflow.NF +import nextflow.SysEnv import nextflow.cli.CliOptions import nextflow.cli.CmdConfig import nextflow.cli.CmdNode @@ -33,10 +34,6 @@ import nextflow.cli.CmdRun import nextflow.exception.AbortOperationException import nextflow.exception.ConfigParseException import nextflow.secret.SecretsLoader -import nextflow.trace.GraphObserver -import nextflow.trace.ReportObserver -import nextflow.trace.TimelineObserver -import nextflow.trace.TraceFileObserver import nextflow.util.HistoryFile import nextflow.util.SecretHelper /** @@ -79,7 +76,7 @@ class ConfigBuilder { Map emptyVariables = new LinkedHashMap<>(10) - Map env = new HashMap<>(System.getenv()) + Map env = new HashMap<>(SysEnv.get()) List warnings = new ArrayList<>(10); @@ -282,7 +279,7 @@ class ConfigBuilder { Map env = [:] if( exportSysEnv ) { log.debug "Adding current system environment to session environment" - env.putAll(System.getenv()) + env.putAll(SysEnv.get()) } if( vars ) { log.debug "Adding the following variables to session environment: $vars" @@ -361,7 +358,7 @@ class ConfigBuilder { // the configuration object binds always the current environment // so that in the configuration file may be referenced any variable // in the current environment - final binding = new HashMap(System.getenv()) + final binding = new HashMap(SysEnv.get()) binding.putAll(env) binding.putAll(configVars()) @@ -630,8 +627,6 @@ class ConfigBuilder { config.trace.enabled = true if( cmdRun.withTrace != '-' ) config.trace.file = cmdRun.withTrace - else if( !config.trace.file ) - config.trace.file = TraceFileObserver.DEF_FILE_NAME } // -- sets report report options @@ -641,8 +636,6 @@ class ConfigBuilder { config.report.enabled = true if( cmdRun.withReport != '-' ) config.report.file = cmdRun.withReport - else if( !config.report.file ) - config.report.file = ReportObserver.DEF_FILE_NAME } // -- sets timeline report options @@ -652,8 +645,6 @@ class ConfigBuilder { config.timeline.enabled = true if( cmdRun.withTimeline != '-' ) config.timeline.file = cmdRun.withTimeline - else if( !config.timeline.file ) - config.timeline.file = TimelineObserver.DEF_FILE_NAME } // -- sets DAG report options @@ -663,8 +654,6 @@ class ConfigBuilder { config.dag.enabled = true if( cmdRun.withDag != '-' ) config.dag.file = cmdRun.withDag - else if( !config.dag.file ) - config.dag.file = GraphObserver.DEF_FILE_NAME } if( cmdRun.withNotification ) { @@ -816,7 +805,7 @@ class ConfigBuilder { def config = buildGivenFiles(configFiles) if( cmdRun ) - configRunOptions(config, System.getenv(), cmdRun) + configRunOptions(config, SysEnv.get(), cmdRun) return config } diff --git a/modules/nextflow/src/main/groovy/nextflow/config/ConfigMap.groovy b/modules/nextflow/src/main/groovy/nextflow/config/ConfigMap.groovy index 52f5de92f2..c2bcd23962 100644 --- a/modules/nextflow/src/main/groovy/nextflow/config/ConfigMap.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/config/ConfigMap.groovy @@ -17,13 +17,48 @@ package nextflow.config import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description /** * Represent Nextflow config as Map * * @author Paolo Di Tommaso */ +@ScopeName('') @CompileStatic -class ConfigMap extends LinkedHashMap { +class ConfigMap extends LinkedHashMap implements ConfigScope { + + @ConfigOption + @Description(""" + The remote work directory used by hybrid workflows. Equivalent to the `-bucket-dir` option of the `run` command. + """) + final String bucketDir + + @ConfigOption + @Description(""" + Delete all files associated with a run in the work directory when the run completes successfully (default: `false`). + """) + final boolean cleanup + + @ConfigOption + @Description(""" + The pipeline output directory. Equivalent to the `-output-dir` option of the `run` command. + """) + final String outputDir + + @ConfigOption + @Description(""" + Enable the use of previously cached task executions. Equivalent to the `-resume` option of the `run` command. + """) + final boolean resume + + @ConfigOption + @Description(""" + The pipeline work directory. Equivalent to the `-work-dir` option of the `run` command. + """) + final String workDir ConfigMap() { } @@ -32,8 +67,13 @@ class ConfigMap extends LinkedHashMap { super(initialCapacity) } - ConfigMap(Map content) { - super(content) + ConfigMap(Map opts) { + super(opts) + bucketDir = opts.bucketDir + cleanup = opts.cleanup as boolean + outputDir = opts.outputDir + resume = opts.resume as boolean + workDir = opts.workDir } } diff --git a/modules/nextflow/src/main/groovy/nextflow/config/ConfigValidator.groovy b/modules/nextflow/src/main/groovy/nextflow/config/ConfigValidator.groovy index 58cd46c96c..c8222923ec 100644 --- a/modules/nextflow/src/main/groovy/nextflow/config/ConfigValidator.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/config/ConfigValidator.groovy @@ -35,9 +35,10 @@ class ConfigValidator { /** * Hidden options added by ConfigBuilder */ - private static final List hiddenOptions = List.of( + private static final List HIDDEN_OPTIONS = List.of( 'cacheable', 'dumpChannels', + 'dumpHashes', 'libDir', 'poolSize', 'preview', @@ -45,6 +46,19 @@ class ConfigValidator { 'stubRun', ); + /** + * Core plugin scopes which can only be validated when + * the plugin is loaded. + */ + private static final List CORE_PLUGIN_SCOPES = List.of( + 'aws', + 'azure', + 'google', + 'k8s', + 'tower', + 'wave' + ) + /** * Additional config scopes added by third-party plugins */ @@ -55,20 +69,24 @@ class ConfigValidator { } private void loadPluginScopes() { - final scopes = new HashMap() + final children = new HashMap() for( final scope : Plugins.getExtensions(ConfigScope) ) { final clazz = scope.getClass() final name = clazz.getAnnotation(ScopeName)?.value() final description = clazz.getAnnotation(Description)?.value() + if( name == '' ) { + children.putAll(SchemaNode.Scope.of(clazz, '').children()) + continue + } if( !name ) continue - if( name in scopes ) { + if( name in children ) { log.warn "Plugin config scope `${clazz.name}` conflicts with existing scope: `${name}`" continue } - scopes.put(name, SchemaNode.Scope.of(clazz, description)) + children.put(name, SchemaNode.Scope.of(clazz, description)) } - pluginScopes = new SchemaNode.Scope('', scopes) + pluginScopes = new SchemaNode.Scope('', children) } void validate(ConfigMap config) { @@ -92,11 +110,12 @@ class ConfigValidator { continue final fqName = names.join('.') if( fqName.startsWith('process.ext.') ) - return - if( !isValid(names) ) { - log.warn "Unrecognized config option '${fqName}'" continue - } + if( isValid(names) ) + continue + if( isMissingCorePluginScope(names.first()) ) + continue + log.warn1 "Unrecognized config option '${fqName}'" } } @@ -115,7 +134,7 @@ class ConfigValidator { * @param names */ boolean isValid(List names) { - if( names.size() == 1 && names.first() in hiddenOptions ) + if( names.size() == 1 && names.first() in HIDDEN_OPTIONS ) return true final child = SchemaNode.ROOT.getChild(names) if( child instanceof SchemaNode.Option || child instanceof SchemaNode.DslOption ) @@ -125,6 +144,17 @@ class ConfigValidator { return false } + /** + * Determine whether a config scope is from a core plugin + * which is not currently loaded. + * + * @param name + */ + private boolean isMissingCorePluginScope(String name) { + return name in CORE_PLUGIN_SCOPES + && !pluginScopes.children().containsKey(name) + } + /** * Warn about setting `NXF_*` environment variables in the config. * diff --git a/modules/nextflow/src/main/groovy/nextflow/config/Manifest.groovy b/modules/nextflow/src/main/groovy/nextflow/config/Manifest.groovy index 626ce52e26..253aca5c78 100644 --- a/modules/nextflow/src/main/groovy/nextflow/config/Manifest.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/config/Manifest.groovy @@ -20,8 +20,11 @@ import java.util.stream.Collectors import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode -import groovy.util.logging.Slf4j +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName import nextflow.exception.AbortOperationException +import nextflow.script.dsl.Description import static nextflow.Const.DEFAULT_MAIN_FILE_NAME /** @@ -29,138 +32,176 @@ import static nextflow.Const.DEFAULT_MAIN_FILE_NAME * * @author Paolo Di Tommaso */ -@Slf4j +@ScopeName("manifest") +@Description(""" + The `manifest` scope allows you to define some metadata that is useful when publishing or running your pipeline. +""") @CompileStatic -class Manifest { - - private Map target - - Manifest() { target = Collections.emptyMap() } - - Manifest(Map object) { - assert object != null - this.target = new HashMap(object.size()) - final validFields = this.metaClass.properties.collect { it.name }.findAll { it!='class' } - object.each { key, value -> - if( validFields.contains(key) ) - target.put(key, value) - else - log.warn("Invalid config manifest attribute `$key`") - } - } - - String getHomePage() { - target.homePage - } - - - String getDefaultBranch() { - target.defaultBranch - } - - String getDescription() { - target.description - } - - String getAuthor() { - target.author - } - - List getContributors() { - if( !target.contributors ) +class Manifest implements ConfigScope { + + @Deprecated + @ConfigOption + @Description(""" + Project author name (use a comma to separate multiple names). + """) + final String author + + @ConfigOption + @Description(""" + List of project contributors. Should be a list of maps. + """) + final List contributors + + @ConfigOption + @Description(""" + Git repository default branch (default: `master`). + """) + final String defaultBranch + + @ConfigOption + @Description(""" + Free text describing the workflow project. + """) + final String description + + @ConfigOption + @Description(""" + Project documentation URL. + """) + final String docsUrl + + @ConfigOption + @Description(""" + Project related publication DOI identifier. + """) + final String doi + + @ConfigOption + @Description(""" + Controls whether git sub-modules should be cloned with the main repository. + + Can be either a boolean value, a list of submodule names, or a comma-separated string of submodule names. + """) + final Object gitmodules + + @ConfigOption + @Description(""" + Project home page URL. + """) + final String homePage + + @ConfigOption + @Description(""" + Project related icon location (Relative path or URL). + """) + final String icon + + @ConfigOption + @Description(""" + Project license. + """) + final String license + + @ConfigOption + @Description(""" + Project main script (default: `main.nf`). + """) + final String mainScript + + @ConfigOption + @Description(""" + Project short name. + """) + final String name + + @ConfigOption + @Description(""" + Minimum required Nextflow version. + """) + final String nextflowVersion + + @ConfigOption + @Description(""" + Project organization. + """) + final String organization + + @ConfigOption + @Description(""" + Pull submodules recursively from the Git repository. + """) + final boolean recurseSubmodules + + @ConfigOption + @Description(""" + Project version number. + """) + final String version + + /* required by extension point -- do not remove */ + Manifest() {} + + Manifest(Map opts) { + author = opts.author as String + contributors = parseContributors(opts.contributors) + defaultBranch = opts.defaultBranch as String + description = opts.description as String + docsUrl = opts.docsUrl as String + doi = opts.doi as String + gitmodules = opts.gitmodules + homePage = opts.homePage as String + icon = opts.icon as String + license = opts.license as String + mainScript = opts.mainScript as String ?: DEFAULT_MAIN_FILE_NAME + name = opts.name as String + nextflowVersion = opts.nextflowVersion as String + organization = opts.organization as String + recurseSubmodules = opts.recurseSubmodules as boolean + version = opts.version as String + } + + private List parseContributors(Object value) { + if( !value ) return Collections.emptyList() try { - final contributors = target.contributors as List + final contributors = value as List return contributors.stream() .map(opts -> new Contributor(opts)) - .collect(Collectors.toList()) + .toList() } catch( ClassCastException | IllegalArgumentException e ){ throw new AbortOperationException("Invalid config option `manifest.contributors` -- should be a list of maps") } } - String getMainScript() { - target.mainScript ?: DEFAULT_MAIN_FILE_NAME - } - - /** - * Controls whether repository sub-modules should be cloned along with the main one. - * - * @return - * Either a boolean value, a list object submodule names or a comma separated string - * of sub-module names - */ - def getGitmodules() { - target.gitmodules - } - - boolean getRecurseSubmodules() { - target.recurseSubmodules - } - - String getNextflowVersion() { - target.nextflowVersion - } - - String getVersion() { - target.version - } - - String getName() { - target.name - } - - String getDoi() { - target.doi - } - - String getDocsUrl() { - target.docsUrl - } - - String getIcon() { - target.icon - } - - String getOrganization() { - target.organization - } - - String getLicense() { - target.license - } - Map toMap() { - final result = new HashMap(15) - result.author = getAuthor() - result.contributors = getContributors().stream() - .map(c -> c.toMap()) - .collect(Collectors.toList()) - result.defaultBranch = getDefaultBranch() - result.description = getDescription() - result.homePage = homePage - result.gitmodules = getGitmodules() - result.mainScript = getMainScript() - result.version = getVersion() - result.nextflowVersion = getNextflowVersion() - result.doi = getDoi() - result.docsUrl = getDocsUrl() - result.icon = getIcon() - result.organization = getOrganization() - result.license = getLicense() - return result + return [ + author: author, + contributors: contributors.stream().map(c -> c.toMap()).toList(), + defaultBranch: defaultBranch, + description: description, + homePage: homePage, + gitmodules: gitmodules, + mainScript: mainScript, + version: version, + nextflowVersion: nextflowVersion, + doi: doi, + docsUrl: docsUrl, + icon: icon, + organization: organization, + license: license, + ] } @EqualsAndHashCode static class Contributor { - String name - String affiliation - String email - String github - Set contribution - String orcid + final String name + final String affiliation + final String email + final String github + final List contribution + final String orcid Contributor(Map opts) { name = opts.name as String @@ -169,22 +210,20 @@ class Manifest { github = opts.github as String contribution = (opts.contribution as List).stream() .map(c -> ContributionType.valueOf(c.toUpperCase())) - .collect(Collectors.toSet()) + .sorted() + .toList() orcid = opts.orcid as String } Map toMap() { - final result = new HashMap(6) - result.name = name - result.affiliation = affiliation - result.email = email - result.github = github - result.contribution = contribution.stream() - .map(c -> c.toString().toLowerCase()) - .sorted() - .collect(Collectors.toList()) - result.orcid = orcid - return result + return [ + name: name, + affiliation: affiliation, + email: email, + github: github, + contribution: contribution.stream().map(c -> c.toString().toLowerCase()).toList(), + orcid: orcid, + ] } } diff --git a/modules/nextflow/src/main/groovy/nextflow/config/WorkflowConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/config/WorkflowConfig.groovy new file mode 100644 index 0000000000..39ab5884fb --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/config/WorkflowConfig.groovy @@ -0,0 +1,141 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.config + +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("workflow") +@Description(""" + The `workflow` scope provides workflow execution options. +""") +@CompileStatic +class WorkflowConfig implements ConfigScope { + + @ConfigOption + @Description(""" + When `true`, the pipeline will exit with a non-zero exit code if any failed tasks are ignored using the `ignore` error strategy (default: `false`). + """) + final boolean failOnIgnore + + @ConfigOption + @Description(""" + Specify a closure that will be invoked at the end of a workflow run (including failed runs). + """) + final Closure onComplete + + @ConfigOption + @Description(""" + Specify a closure that will be invoked if a workflow run is terminated. + """) + final Closure onError + + @Description(""" + The `workflow.output` scope provides options for publishing workflow outputs. + + [Read more](https://nextflow.io/docs/latest/reference/config.html#workflow) + """) + final WorkflowOutputConfig output + + /* required by extension point -- do not remove */ + WorkflowConfig() {} + + WorkflowConfig(Map opts) { + failOnIgnore = opts.failOnIgnore as boolean + onComplete = opts.onComplete as Closure + onError = opts.onError as Closure + output = new WorkflowOutputConfig(opts.output as Map ?: Collections.emptyMap()) + } + +} + +@CompileStatic +class WorkflowOutputConfig implements ConfigScope { + + @ConfigOption + @Description(""" + *Currently only supported for S3.* + + Specify the media type, also known as [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/MIME_types), of published files (default: `false`). Can be a string (e.g. `'text/html'`), or `true` to infer the content type from the file extension. + """) + final Object contentType + + @ConfigOption + @Description(""" + *Currently only supported for local and shared filesystems.* + + Copy file attributes (such as the last modified timestamp) to the published file (default: `false`). + """) + final boolean copyAttributes + + @ConfigOption + @Description(""" + Enable or disable publishing (default: `true`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + When `true`, the workflow will not fail if a file can't be published for some reason (default: `false`). + """) + final boolean ignoreErrors + + @ConfigOption + @Description(""" + The file publishing method (default: `'symlink'`). + """) + final String mode + + @ConfigOption + @Description(""" + When `true` any existing file in the specified folder will be overwritten (default: `'standard'`). + """) + final Object overwrite + + @ConfigOption + @Description(""" + *Currently only supported for S3.* + + Specify the storage class for published files. + """) + final String storageClass + + @ConfigOption + @Description(""" + *Currently only supported for S3.* + + Specify arbitrary tags for published files. + """) + final Map tags + + /* required by extension point -- do not remove */ + WorkflowOutputConfig() {} + + WorkflowOutputConfig(Map opts) { + contentType = opts.contentType + copyAttributes = opts.copyAttributes as boolean + enabled = opts.enabled as boolean + ignoreErrors = opts.ignoreErrors as boolean + mode = opts.mode + overwrite = opts.overwrite + storageClass = opts.storageClass + tags = opts.tags as Map + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ApptainerBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerBuilder.groovy index e381c9b163..6b436baee6 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ApptainerBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerBuilder.groovy @@ -29,10 +29,27 @@ import groovy.transform.CompileStatic @CompileStatic class ApptainerBuilder extends SingularityBuilder { - ApptainerBuilder(String name) { + ApptainerBuilder(String name, ApptainerConfig config) { super(name) + applyConfig(config) + } + + ApptainerBuilder(String name) { + this(name, new ApptainerConfig([:])) } @Override protected String getBinaryName() { 'apptainer' } + + protected void applyConfig(ApptainerConfig config) { + + if( config.autoMounts != null ) + this.autoMounts = config.autoMounts + + if( config.engineOptions ) + this.addEngineOptions(config.engineOptions) + + if( config.runOptions ) + this.addRunOptions(config.runOptions) + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ApptainerCache.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerCache.groovy index 6174fd9d31..1b9e545daf 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ApptainerCache.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerCache.groovy @@ -27,8 +27,8 @@ import groovy.transform.CompileStatic @CompileStatic class ApptainerCache extends SingularityCache { - ApptainerCache(ContainerConfig config, Map env=null) { - super(config, env) + ApptainerCache(ApptainerConfig config, Map env=null) { + super(config.cacheDir, config.libraryDir, config.noHttps, config.pullTimeout, env) } @Override diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ApptainerConfig.java b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerConfig.groovy similarity index 51% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/ApptainerConfig.java rename to modules/nextflow/src/main/groovy/nextflow/container/ApptainerConfig.groovy index 0821a33c33..3129ff8239 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ApptainerConfig.java +++ b/modules/nextflow/src/main/groovy/nextflow/container/ApptainerConfig.groovy @@ -13,80 +13,113 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package nextflow.config.scopes; +package nextflow.container -import groovy.transform.CompileStatic; -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration -public class ApptainerConfig implements ConfigScope { +@ScopeName("apptainer") +@Description(""" + The `apptainer` scope controls how [Apptainer](https://apptainer.org) containers are executed by Nextflow. +""") +@CompileStatic +class ApptainerConfig implements ConfigScope, ContainerConfig { @ConfigOption @Description(""" - When `true` Nextflow automatically mounts host paths in the executed container. It requires the `user bind control` feature to be enabled in your Apptainer installation (default: `true`). + Automatically mount host paths in the executed container (default: `true`). It requires the `user bind control` feature to be enabled in your Apptainer installation. """) - public boolean autoMounts; + final Boolean autoMounts @ConfigOption @Description(""" The directory where remote Apptainer images are stored. When using a computing cluster it must be a shared folder accessible to all compute nodes. """) - public String cacheDir; + final String cacheDir @ConfigOption @Description(""" - Enable Apptainer execution (default: `false`). + Execute tasks with Apptainer containers (default: `false`). """) - public boolean enabled; + final boolean enabled @ConfigOption @Description(""" - This attribute can be used to provide any option supported by the Apptainer engine i.e. `apptainer [OPTIONS]`. + Specify additional options supported by the Apptainer engine i.e. `apptainer [OPTIONS]`. """) - public String engineOptions; + final String engineOptions @ConfigOption @Description(""" Comma separated list of environment variable names to be included in the container environment. """) - public String envWhitelist; + final List envWhitelist @ConfigOption @Description(""" Directory where remote Apptainer images are retrieved. When using a computing cluster it must be a shared folder accessible to all compute nodes. """) - public String libraryDir; + final String libraryDir @ConfigOption @Description(""" Pull the Apptainer image with http protocol (default: `false`). """) - public boolean noHttps; + final boolean noHttps @ConfigOption @Description(""" When enabled, OCI (and Docker) container images are pulled and converted to the SIF format by the Apptainer run command, instead of Nextflow (default: `false`). """) - public boolean ociAutoPull; + final boolean ociAutoPull @ConfigOption @Description(""" The amount of time the Apptainer pull can last, exceeding which the process is terminated (default: `20 min`). """) - public Duration pullTimeout; + final Duration pullTimeout @ConfigOption @Description(""" The registry from where Docker images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. """) - public String registry; + final String registry @ConfigOption @Description(""" - This attribute can be used to provide any extra command line options supported by `apptainer exec`. + Specify extra command line options supported by `apptainer exec`. """) - public String runOptions; + final String runOptions + + /* required by extension point -- do not remove */ + ApptainerConfig() {} + + ApptainerConfig(Map opts) { + autoMounts = opts.autoMounts as Boolean + cacheDir = opts.cacheDir + enabled = opts.enabled as boolean + engineOptions = opts.engineOptions + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + libraryDir = opts.libraryDir + noHttps = opts.noHttps as boolean + ociAutoPull = opts.ociAutoPull as boolean + pullTimeout = opts.pullTimeout as Duration + registry = opts.registry + runOptions = opts.runOptions + } + + @Override + String getEngine() { + return 'apptainer' + } + + @Override + boolean canRunOciImage() { + return ociAutoPull + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy index cd4f3f1eb5..3c628699ab 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy @@ -34,30 +34,33 @@ import nextflow.Global @Slf4j class CharliecloudBuilder extends ContainerBuilder { - private boolean writeFake = true + private boolean writeFake - CharliecloudBuilder(String name) { + CharliecloudBuilder(String name, CharliecloudConfig config) { this.image = name + + if( config.runOptions ) + addRunOptions(config.runOptions) + + if( config.temp ) + this.temp = config.temp + + this.writeFake = config.writeFake + } + + CharliecloudBuilder(String name) { + this(name, new CharliecloudConfig([:])) } @Override CharliecloudBuilder params(Map params) { - if( params.containsKey('temp') ) - this.temp = params.temp - if( params.containsKey('entry') ) this.entryPoint = params.entry - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) - - if ( params.containsKey('writeFake') ) - this.writeFake = params.writeFake?.toString() != 'false' - if( params.containsKey('readOnlyInputs') ) - this.readOnlyInputs = params.readOnlyInputs?.toString() == 'true' - + this.readOnlyInputs = params.readOnlyInputs + return this } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudCache.groovy b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudCache.groovy index 1206b4e6ed..6a6f997413 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudCache.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudCache.groovy @@ -43,14 +43,12 @@ class CharliecloudCache { static final private Map> localImageNames = new ConcurrentHashMap<>() - private ContainerConfig config + private CharliecloudConfig config private Map env private boolean missingCacheDir - private Duration pullTimeout = Duration.of('20min') - private String registry @TestOnly @@ -60,10 +58,10 @@ class CharliecloudCache { /** * Create a Charliecloud cache object * - * @param config A {@link ContainerConfig} object + * @param config A {@link CharliecloudConfig} object * @param env The environment configuration object. Specifying {@code null} the current system environment is used */ - CharliecloudCache(ContainerConfig config, Map env=null) { + CharliecloudCache(CharliecloudConfig config, Map env=null) { this.config = config this.env = env ?: System.getenv() } @@ -129,19 +127,11 @@ class CharliecloudCache { @PackageScope Path getCacheDir() { - if( config.pullTimeout ) - pullTimeout = config.pullTimeout as Duration - - def writeFake = true - - if( config.writeFake ) - writeFake = config.writeFake?.toString() == 'true' + String str = config.cacheDir - def str = config.cacheDir as String + final charliecloudImageStorage = env.get('CH_IMAGE_STORAGE') - def charliecloudImageStorage = env.get('CH_IMAGE_STORAGE') - - if( charliecloudImageStorage && writeFake) { + if( charliecloudImageStorage && config.writeFake) { return checkDir(charliecloudImageStorage) } @@ -162,16 +152,14 @@ class CharliecloudCache { return checkDir(str) } - def workDir = Global.session.workDir + final workDir = Global.session.workDir if( workDir.fileSystem != FileSystems.default ) { throw new IOException("Charliecloud cannot store image in a remote work directory -- Use a POSIX compatible work directory or specify an alternative path with the `NXF_CHARLIECLOUD_CACHEDIR` env variable") } missingCacheDir = true - def result = workDir.resolve('charliecloud') - - return result + return workDir.resolve('charliecloud') } /** @@ -253,10 +241,10 @@ class CharliecloudCache { int runCommand( String cmd, Path storePath ) { log.trace """Charliecloud pull command: $cmd - timeout: $pullTimeout + timeout: $config.pullTimeout folder : $storePath""".stripIndent(true) - final max = pullTimeout.toMillis() + final max = config.pullTimeout.toMillis() final builder = new ProcessBuilder(['bash','-c',cmd]) builder.environment().remove('CH_IMAGE_STORAGE') final proc = builder.start() @@ -266,7 +254,7 @@ class CharliecloudCache { def status = proc.exitValue() if( status != 0 ) { consumer.join() - def msg = "Charliecloud failed to pull image\n command: $cmd\n status : $status\n hint : Try and increase charliecloud.pullTimeout in the config (current is \"${pullTimeout}\")\n message:\n" + def msg = "Charliecloud failed to pull image\n command: $cmd\n status : $status\n hint : Try and increase charliecloud.pullTimeout in the config (current is \"${config.pullTimeout}\")\n message:\n" msg += err.toString().trim().indent(' ') throw new IllegalStateException(msg) } diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/CharliecloudConfig.java b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy similarity index 52% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/CharliecloudConfig.java rename to modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy index 36abbea5ff..9f817a462e 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/CharliecloudConfig.java +++ b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy @@ -13,67 +13,87 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package nextflow.config.scopes; +package nextflow.container -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration -public class CharliecloudConfig implements ConfigScope { +@ScopeName("charliecloud") +@Description(""" + The `charliecloud` scope controls how [Charliecloud](https://hpc.github.io/charliecloud/) containers are executed by Nextflow. +""") +@CompileStatic +class CharliecloudConfig implements ConfigScope, ContainerConfig { @ConfigOption @Description(""" The directory where remote Charliecloud images are stored. When using a computing cluster it must be a shared folder accessible to all compute nodes. """) - public String cacheDir; + final String cacheDir @ConfigOption @Description(""" - Enable Charliecloud execution (default: `false`). + Execute tasks with Charliecloud containers (default: `false`). """) - public boolean enabled; + final boolean enabled @ConfigOption @Description(""" Comma separated list of environment variable names to be included in the container environment. """) - public String envWhitelist; + final List envWhitelist @ConfigOption @Description(""" The amount of time the Charliecloud pull can last, exceeding which the process is terminated (default: `20 min`). """) - public Duration pullTimeout; + final Duration pullTimeout @ConfigOption @Description(""" The registry from where images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. """) - public String registry; + final String registry @ConfigOption @Description(""" - This attribute can be used to provide any extra command line options supported by the `ch-run` command. + Specify extra command line options supported by the `ch-run` command. """) - public String runOptions; + final String runOptions @ConfigOption @Description(""" Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. """) - public String temp; + final String temp @ConfigOption @Description(""" - Create a temporary squashFS container image in the process work directory instead of a folder. + Run containers from storage in writeable mode using overlayfs (default: `true`). """) - public boolean useSquash; + final boolean writeFake - @ConfigOption - @Description(""" - Enable `writeFake` with charliecloud. This allows to run containers from storage in writeable mode using overlayfs. - """) - public boolean writeFake; + /* required by extension point -- do not remove */ + CharliecloudConfig() {} + + CharliecloudConfig(Map opts) { + cacheDir = opts.cacheDir + enabled = opts.enabled as boolean + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + pullTimeout = opts.pullTimeout as Duration ?: Duration.of('20min') + registry = opts.registry + runOptions = opts.runOptions + temp = opts.temp + writeFake = opts.writeFake != null ? opts.writeFake as boolean : true + } + + @Override + String getEngine() { + return 'charliecloud' + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ContainerBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ContainerBuilder.groovy index e70dc2216b..a858167db8 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ContainerBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ContainerBuilder.groovy @@ -18,6 +18,7 @@ package nextflow.container import java.nio.file.Path +import groovy.transform.CompileStatic import nextflow.executor.BashWrapperBuilder import nextflow.util.Escape import nextflow.util.MemoryUnit @@ -27,30 +28,29 @@ import nextflow.util.PathTrie * * @author Paolo Di Tommaso */ +@CompileStatic abstract class ContainerBuilder { /** * Create a builder instance given the container engine */ - static ContainerBuilder create(String engine, String containerImage) { - if( engine == 'docker' ) - return new DockerBuilder(containerImage) - if( engine == 'podman' ) - return new PodmanBuilder(containerImage) - if( engine == 'singularity' ) - return new SingularityBuilder(containerImage) - if( engine == 'apptainer' ) - return new ApptainerBuilder(containerImage) - if( engine == 'udocker' ) - return new UdockerBuilder(containerImage) - if( engine == 'sarus' ) - return new SarusBuilder(containerImage) - if( engine == 'shifter' ) - return new ShifterBuilder(containerImage) - if( engine == 'charliecloud' ) - return new CharliecloudBuilder(containerImage) + static ContainerBuilder create(ContainerConfig config, String containerImage) { + if( config instanceof DockerConfig ) + return new DockerBuilder(containerImage, config) + if( config instanceof PodmanConfig ) + return new PodmanBuilder(containerImage, config) + if( config instanceof SingularityConfig ) + return new SingularityBuilder(containerImage, config) + if( config instanceof ApptainerConfig ) + return new ApptainerBuilder(containerImage, config) + if( config instanceof SarusConfig ) + return new SarusBuilder(containerImage, config) + if( config instanceof ShifterConfig ) + return new ShifterBuilder(containerImage, config) + if( config instanceof CharliecloudConfig ) + return new CharliecloudBuilder(containerImage, config) // - throw new IllegalArgumentException("Unknown container engine: $engine") + throw new IllegalArgumentException("Unknown container engine: $config.engine") } final protected List env = [] diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy index 10819626e6..9d4c3b62ff 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy @@ -17,129 +17,51 @@ package nextflow.container import groovy.transform.CompileStatic -import groovy.transform.Memoized -import groovy.util.logging.Slf4j /** - * Models container engine configuration + * Models generic container configuration * * @author Paolo Di Tommaso */ -@Slf4j @CompileStatic -class ContainerConfig extends LinkedHashMap { +interface ContainerConfig { - private Map sysEnv = System.getenv() - - /* required by Kryo deserialization -- do not remove */ - private ContainerConfig() { } - - ContainerConfig(Map config) { - super(config) - } - - ContainerConfig(Map config, Map env) { - super(config) - this.sysEnv = env + default boolean canRunOciImage() { + return false } - boolean isEnabled() { - return get('enabled')?.toString() == 'true' + default boolean entrypointOverride() { + return ContainerHelper.entrypointOverride() } - ContainerConfig setEnabled(boolean value) { - put('enabled', value) - return this + default String getFusionOptions() { + return null } - String getEngine() { - return get('engine') + default Object getKill() { + return null } - String getRegistry() { - return get('registry') + default String getRegistry() { + return null } - boolean getRegistryOverride() { - final val = get('registryOverride') - return val!=null ? Boolean.parseBoolean(val.toString()) : false + default boolean getRegistryOverride() { + return false } - /** - * Whenever Singularity or Apptainer container engine can a OCI (Docker) - * image without requiring a separate OCI to SIF conversion execution (managed by Nextflow via {@link SingularityCache). - * - * @return - * {@code true} when the OCI container can be run without an explicit OCI to SIF conversion or {@code false} - * otherwise - * - */ - boolean canRunOciImage() { - if( isSingularityOciMode() ) - return true - if( (getEngine()!='singularity' && getEngine()!='apptainer') ) - return false - return get('ociAutoPull')?.toString()=='true' + default String getTemp() { + return null } - /** - * Whenever the Singularity OCI-mode is enabled - * - * @return {@code true} when Singularity OCI-mode is enabled or {@code false} otherwise - */ - @Memoized - boolean isSingularityOciMode() { - if( getEngine()!='singularity' ) { - return false - } - if( get('oci')?.toString()=='true' ) { - log.warn "The setting `singularity.oci` is deprecated - use `singularity.ociMode` instead" - return true - } - if( get('ociMode')?.toString()=='true' ) - return true - return false + default boolean writableInputMounts() { + return true } - List getEnvWhitelist() { - def result = get('envWhitelist') - if( !result ) - return Collections.emptyList() - - if( result instanceof CharSequence ) - return result.tokenize(',').collect { it.trim() } - - if( result instanceof List ) - return result - - throw new IllegalArgumentException("Not a valid `envWhitelist` argument") - } + String getEngine() - boolean entrypointOverride() { - def result = get('entrypointOverride') - if( result == null ) - result = sysEnv.get('NXF_CONTAINER_ENTRYPOINT_OVERRIDE') - if( result != null ) - return Boolean.parseBoolean(result.toString()) - return false - } + List getEnvWhitelist() - String fusionOptions() { - final result = get('fusionOptions') - return result!=null ? result : defaultFusionOptions() - } + boolean isEnabled() - protected String defaultFusionOptions() { - final eng = getEngine() - if( !eng ) - return null - if( eng=='docker' || eng=='podman' ) - return '--rm --privileged' - if( isSingularityOciMode() ) - return '-B /dev/fuse' - if( eng=='singularity' || eng=='apptainer' ) - return null - log.warn "Fusion file system is not supported by '$eng' container engine" - return null - } } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ContainerHandler.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ContainerHandler.groovy index af54482bf9..c6fe2832a3 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ContainerHandler.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ContainerHandler.groovy @@ -43,12 +43,12 @@ class ContainerHandler { private Path baseDir - ContainerHandler(Map containerConfig) { - this(containerConfig, CWD) + ContainerHandler(ContainerConfig config) { + this(config, CWD) } - ContainerHandler(Map containerConfig, Path dir) { - this.config = containerConfig as ContainerConfig + ContainerHandler(ContainerConfig config, Path dir) { + this.config = config this.baseDir = dir } @@ -57,14 +57,10 @@ class ContainerHandler { Path getBaseDir() { baseDir } String normalizeImageName(String imageName) { - final engine = config.getEngine() - if( engine == 'shifter' ) { + if( config instanceof ShifterConfig ) { return normalizeShifterImageName(imageName) } - if( engine == 'udocker' ) { - return normalizeUdockerImageName(imageName) - } - if( engine == 'singularity' ) { + if( config instanceof SingularityConfig ) { final normalizedImageName = normalizeSingularityImageName(imageName) if( !config.isEnabled() || !normalizedImageName ) return normalizedImageName @@ -73,10 +69,10 @@ class ContainerHandler { final requiresCaching = normalizedImageName =~ IMAGE_URL_PREFIX if( ContainerInspectMode.dryRun() && requiresCaching ) return imageName - final result = requiresCaching ? createSingularityCache(this.config, normalizedImageName) : normalizedImageName + final result = requiresCaching ? createSingularityCache(config, normalizedImageName) : normalizedImageName return Escape.path(result) } - if( engine == 'apptainer' ) { + if( config instanceof ApptainerConfig ) { final normalizedImageName = normalizeApptainerImageName(imageName) if( !config.isEnabled() || !normalizedImageName ) return normalizedImageName @@ -85,10 +81,10 @@ class ContainerHandler { final requiresCaching = normalizedImageName =~ IMAGE_URL_PREFIX if( ContainerInspectMode.dryRun() && requiresCaching ) return imageName - final result = requiresCaching ? createApptainerCache(this.config, normalizedImageName) : normalizedImageName + final result = requiresCaching ? createApptainerCache(config, normalizedImageName) : normalizedImageName return Escape.path(result) } - if( engine == 'charliecloud' ) { + if( config instanceof CharliecloudConfig ) { final normalizedImageName = normalizeCharliecloudImageName(imageName) if( !config.isEnabled() || !normalizedImageName ) return normalizedImageName @@ -97,7 +93,7 @@ class ContainerHandler { final requiresCaching = !imageName.startsWith('/') if( ContainerInspectMode.dryRun() && requiresCaching ) return imageName - final result = requiresCaching ? createCharliecloudCache(this.config, normalizedImageName) : normalizedImageName + final result = requiresCaching ? createCharliecloudCache(config, normalizedImageName) : normalizedImageName return Escape.path(result) } // fallback to docker @@ -105,18 +101,18 @@ class ContainerHandler { } @PackageScope - String createSingularityCache(Map config, String imageName) { - new SingularityCache(new ContainerConfig(config)) .getCachePathFor(imageName) .toString() + String createSingularityCache(SingularityConfig config, String imageName) { + SingularityCache.create(config) .getCachePathFor(imageName) .toString() } @PackageScope - String createApptainerCache(Map config, String imageName) { - new ApptainerCache(new ContainerConfig(config)) .getCachePathFor(imageName) .toString() + String createApptainerCache(ApptainerConfig config, String imageName) { + new ApptainerCache(config) .getCachePathFor(imageName) .toString() } @PackageScope - String createCharliecloudCache(Map config, String imageName) { - new CharliecloudCache(new ContainerConfig(config)) .getCachePathFor(imageName) .toString() + String createCharliecloudCache(CharliecloudConfig config, String imageName) { + new CharliecloudCache(config) .getCachePathFor(imageName) .toString() } /** @@ -158,7 +154,7 @@ class ContainerHandler { if( !imageName ) return null - String reg = this.config?.getRegistry() + String reg = config.getRegistry() if( !reg ) return imageName @@ -183,26 +179,6 @@ class ContainerHandler { image.contains('.') || image.contains(':') } - /** - * Normalize Udocker image name adding `:latest` - * when required - * - * @param imageName The container image name - * @return Image name in Udocker canonical format - */ - @PackageScope - String normalizeUdockerImageName( String imageName ) { - - if( !imageName ) - return null - - if( !imageName.contains(':') ) - imageName += ':latest' - - return imageName - } - - public static final Pattern IMAGE_URL_PREFIX = ~/^[^\/:. ]+:\/\/(.*)/ /** diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ContainerHelper.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ContainerHelper.groovy new file mode 100644 index 0000000000..f987a6af06 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/container/ContainerHelper.groovy @@ -0,0 +1,42 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.container + +import groovy.transform.CompileStatic +import nextflow.SysEnv + +@CompileStatic +class ContainerHelper { + + static boolean entrypointOverride() { + return SysEnv.getBool('NXF_CONTAINER_ENTRYPOINT_OVERRIDE', false) + } + + static boolean fixOwnership(ContainerConfig config) { + return config instanceof DockerConfig && config.fixOwnership + } + + static List parseEnvWhitelist(Object value) { + if( !value ) + return [] + if( value instanceof CharSequence ) + return value.tokenize(',').collect { it.trim() } + if( value instanceof List ) + return value as List + throw new IllegalArgumentException("Not a valid `envWhitelist` argument: $value") + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy index 1b556cf43b..bb99d10c9e 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy @@ -30,7 +30,7 @@ class DockerBuilder extends ContainerBuilder { private boolean sudo - private boolean remove = true + private boolean remove private String registry @@ -52,34 +52,37 @@ class DockerBuilder extends ContainerBuilder { private String capAdd - DockerBuilder( String name ) { + DockerBuilder(String name, DockerConfig config) { this.image = name - } - @Override - DockerBuilder params( Map params ) { - if( !params ) return this + if( config.engineOptions ) + addEngineOptions(config.engineOptions) - if( params.containsKey('temp') ) - this.temp = params.temp + this.legacy = config.legacy - if( params.containsKey('engineOptions') ) - addEngineOptions(params.engineOptions.toString()) + if( config.mountFlags ) + this.mountFlags0 = config.mountFlags - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) + this.remove = config.remove - if ( params.userEmulation?.toString() == 'true' ) - log.warn1("Undocumented setting `docker.userEmulation` is not supported any more - please remove it from your config") + if( config.runOptions ) + addRunOptions(config.runOptions) - if ( params.containsKey('remove') ) - this.remove = params.remove?.toString() == 'true' + this.sudo = config.sudo - if( params.containsKey('sudo') ) - this.sudo = params.sudo?.toString() == 'true' + if( config.temp ) + this.temp = config.temp - if( params.containsKey('tty') ) - this.tty = params.tty?.toString() == 'true' + this.tty = config.tty + } + + DockerBuilder(String name) { + this(name, new DockerConfig([:])) + } + + @Override + DockerBuilder params( Map params ) { + if( !params ) return this if( params.containsKey('entry') ) this.entryPoint = params.entry @@ -87,17 +90,11 @@ class DockerBuilder extends ContainerBuilder { if( params.containsKey('kill') ) this.kill = params.kill - if( params.containsKey('legacy') ) - this.legacy = params.legacy?.toString() == 'true' - if( params.containsKey('readOnlyInputs') ) - this.readOnlyInputs = params.readOnlyInputs?.toString() == 'true' - - if( params.containsKey('mountFlags') ) - this.mountFlags0 = params.mountFlags + this.readOnlyInputs = params.readOnlyInputs if( params.containsKey('privileged') ) - this.privileged = params.privileged?.toString() == 'true' + this.privileged = params.privileged if( params.containsKey('device') ) this.device = params.device diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy new file mode 100644 index 0000000000..7d3a095d62 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy @@ -0,0 +1,151 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.container + +import groovy.transform.CompileStatic +import groovy.transform.EqualsAndHashCode +import groovy.util.logging.Slf4j +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("docker") +@Description(""" + The `docker` scope controls how [Docker](https://www.docker.com) containers are executed by Nextflow. +""") +@Slf4j +@CompileStatic +@EqualsAndHashCode +class DockerConfig implements ConfigScope, ContainerConfig { + + @ConfigOption + @Description(""" + Enable Docker execution (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Specify additional options supported by the Docker engine i.e. `docker [OPTIONS]`. + """) + final String engineOptions + + @ConfigOption + @Description(""" + Comma separated list of environment variable names to be included in the container environment. + """) + final List envWhitelist + + @ConfigOption + @Description(""" + Fix ownership of files created by the Docker container (default: `false`). + """) + final boolean fixOwnership + + @ConfigOption + @Description(""" + """) + final Object kill + + @ConfigOption + @Description(""" + Use command line options removed since Docker 1.10.0 (default: `false`). + """) + final boolean legacy + + @ConfigOption + @Description(""" + Add the specified flags to the volume mounts e.g. `'ro,Z'`. + """) + final String mountFlags + + @ConfigOption + @Description(""" + The registry from where Docker images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. + """) + final String registry + + @ConfigOption + @Description(""" + When `true`, forces the override of the registry name in fully qualified container image names with the registry specified by `docker.registry` (default: `false`). + """) + final boolean registryOverride + + @ConfigOption + @Description(""" + Clean up the container after the execution (default: `true`). See the [Docker documentation](https://docs.docker.com/engine/reference/run/#clean-up---rm) for details. + """) + final boolean remove + + @ConfigOption + @Description(""" + Specify extra command line options supported by the `docker run` command. See the [Docker documentation](https://docs.docker.com/engine/reference/run/) for details. + """) + final String runOptions + + @ConfigOption + @Description(""" + Executes Docker run command as `sudo` (default: `false`). + """) + final boolean sudo + + @ConfigOption + @Description(""" + Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. + """) + final String temp + + @ConfigOption + @Description(""" + Allocates a pseudo-tty (default: `false`). + """) + final boolean tty + + /* required by extension point -- do not remove */ + DockerConfig() {} + + DockerConfig(Map opts) { + enabled = opts.enabled as boolean + engineOptions = opts.engineOptions + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + fixOwnership = opts.fixOwnership as boolean + kill = opts.kill != null ? opts.kill : true + legacy = opts.legacy as boolean + mountFlags = opts.mountFlags + registry = opts.registry + registryOverride = opts.registryOverride as boolean + remove = opts.remove != null ? opts.remove as boolean : true + runOptions = opts.runOptions + sudo = opts.sudo as boolean + temp = opts.temp + tty = opts.tty as boolean + + if( opts.userEmulation ) + log.warn1("Config setting `docker.userEmulation` is not supported anymore") + } + + @Override + String getEngine() { + return 'docker' + } + + @Override + String getFusionOptions() { + return '--rm --privileged' + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/PodmanBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/PodmanBuilder.groovy index c6db5591bc..44e1ffd783 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/PodmanBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/PodmanBuilder.groovy @@ -25,7 +25,7 @@ import groovy.transform.CompileStatic @CompileStatic class PodmanBuilder extends ContainerBuilder { - private boolean remove = true + private boolean remove private String registry @@ -43,25 +43,31 @@ class PodmanBuilder extends ContainerBuilder { private String capAdd - PodmanBuilder( String name ) { + PodmanBuilder(String name, PodmanConfig config) { this.image = name - } - @Override - PodmanBuilder params( Map params ) { - if( !params ) return this + if( config.engineOptions ) + addEngineOptions(config.engineOptions) + + if( config.mountFlags ) + this.mountFlags0 = config.mountFlags + + this.remove = config.remove - if( params.containsKey('temp') ) - this.temp = params.temp + if( config.runOptions ) + addRunOptions(config.runOptions) - if( params.containsKey('engineOptions') ) - addEngineOptions(params.engineOptions.toString()) + if( config.temp ) + this.temp = config.temp + } - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) + PodmanBuilder(String name) { + this(name, new PodmanConfig([:])) + } - if ( params.containsKey('remove') ) - this.remove = params.remove?.toString() == 'true' + @Override + PodmanBuilder params( Map params ) { + if( !params ) return this if( params.containsKey('entry') ) this.entryPoint = params.entry @@ -72,9 +78,6 @@ class PodmanBuilder extends ContainerBuilder { if( params.containsKey('readOnlyInputs') ) this.readOnlyInputs = params.readOnlyInputs?.toString() == 'true' - if( params.containsKey('mountFlags') ) - this.mountFlags0 = params.mountFlags - if( params.containsKey('privileged') ) this.privileged = params.privileged?.toString() == 'true' diff --git a/modules/nextflow/src/main/groovy/nextflow/container/PodmanConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/PodmanConfig.groovy new file mode 100644 index 0000000000..1fbe09ea55 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/container/PodmanConfig.groovy @@ -0,0 +1,111 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.container + +import groovy.transform.CompileStatic +import groovy.transform.EqualsAndHashCode +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("podman") +@Description(""" + The `podman` scope controls how [Podman](https://podman.io/) containers are executed by Nextflow. +""") +@CompileStatic +@EqualsAndHashCode +class PodmanConfig implements ConfigScope, ContainerConfig { + + @ConfigOption + @Description(""" + Execute tasks with Podman containers (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Specify additional options supported by the Podman engine i.e. `podman [OPTIONS]`. + """) + final String engineOptions + + @ConfigOption + @Description(""" + Comma separated list of environment variable names to be included in the container environment. + """) + final List envWhitelist + + @ConfigOption + @Description(""" + """) + final Object kill + + @ConfigOption + @Description(""" + Add the specified flags to the volume mounts e.g. `'ro,Z'`. + """) + final String mountFlags + + @ConfigOption + @Description(""" + The registry from where container images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. + """) + final String registry + + @ConfigOption + @Description(""" + Clean-up the container after the execution (default: `true`). + """) + final boolean remove + + @ConfigOption + @Description(""" + Specify extra command line options supported by the `podman run` command. + """) + final String runOptions + + @ConfigOption + @Description(""" + Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. + """) + final String temp + + /* required by extension point -- do not remove */ + PodmanConfig() {} + + PodmanConfig(Map opts) { + enabled = opts.enabled as boolean + engineOptions = opts.engineOptions + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + kill = opts.kill != null ? opts.kill : true + mountFlags = opts.mountFlags + registry = opts.registry + remove = opts.remove != null ? opts.remove as boolean : true + runOptions = opts.runOptions + temp = opts.temp + } + + @Override + String getEngine() { + return 'podman' + } + + @Override + String getFusionOptions() { + return '--rm --privileged' + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/SarusBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/SarusBuilder.groovy index f73cb2ae27..6c5e958863 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/SarusBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/SarusBuilder.groovy @@ -28,15 +28,28 @@ class SarusBuilder extends ContainerBuilder { private boolean verbose - SarusBuilder( String image ) { - assert image + SarusBuilder(String image, SarusConfig config) { this.image = image + + if( config.runOptions ) + addRunOptions(config.runOptions) + this.tty = config.tty + this.verbose = config.verbose + } + + SarusBuilder(String image) { + this(image, new SarusConfig([:])) + } + + SarusBuilder params( Map params ) { + if( params.containsKey('entry') ) + this.entryPoint = params.entry + + return this } @Override SarusBuilder build(StringBuilder result) { - assert image - result << 'sarus ' if( verbose ) @@ -64,23 +77,6 @@ class SarusBuilder extends ContainerBuilder { return this } - SarusBuilder params( Map params ) { - - if( params.containsKey('verbose') ) - this.verbose = params.verbose.toString() == 'true' - - if( params.containsKey('entry') ) - this.entryPoint = params.entry - - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) - - if( params.containsKey('tty') ) - this.tty = params.tty?.toString() == 'true' - - return this - } - @Override String getRunCommand() { def run = super.getRunCommand() diff --git a/modules/nextflow/src/main/groovy/nextflow/container/SarusConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/SarusConfig.groovy new file mode 100644 index 0000000000..05c4c88391 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/container/SarusConfig.groovy @@ -0,0 +1,78 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.container + +import groovy.transform.CompileStatic +import groovy.transform.EqualsAndHashCode +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("sarus") +@Description(""" + The `sarus` scope controls how [Sarus](https://sarus.readthedocs.io) containers are executed by Nextflow. +""") +@CompileStatic +@EqualsAndHashCode +class SarusConfig implements ConfigScope, ContainerConfig { + + @ConfigOption + @Description(""" + Execute tasks with Sarus containers (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Comma-separated list of environment variable names to be included in the container environment. + """) + final List envWhitelist + + @ConfigOption + @Description(""" + Specify extra command line options supported by the `sarus run` command. + """) + final String runOptions + + @ConfigOption + @Description(""" + Allocates a pseudo-tty (default: `false`). + """) + final String tty + + @ConfigOption + @Description(""" + """) + final boolean verbose + + /* required by extension point -- do not remove */ + SarusConfig() {} + + SarusConfig(Map opts) { + enabled = opts.enabled as boolean + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + runOptions = opts.runOptions + tty = opts.tty + verbose = opts.verbose as boolean + } + + @Override + String getEngine() { + return 'sarus' + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ShifterBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ShifterBuilder.groovy index 3dbfe01707..9d41950986 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ShifterBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ShifterBuilder.groovy @@ -26,14 +26,24 @@ class ShifterBuilder extends ContainerBuilder { private boolean verbose - ShifterBuilder( String image ) { - assert image + ShifterBuilder(String image, ShifterConfig config) { this.image = image + this.verbose = config.verbose + } + + ShifterBuilder(String image) { + this(image, new ShifterConfig([:])) + } + + ShifterBuilder params( Map params ) { + if( params.containsKey('entry') ) + this.entryPoint = params.entry + + return this } @Override ShifterBuilder build(StringBuilder result) { - assert image appendEnv(result) @@ -48,17 +58,6 @@ class ShifterBuilder extends ContainerBuilder { return this } - ShifterBuilder params( Map params ) { - - if( params.containsKey('verbose') ) - this.verbose = params.verbose.toString() == 'true' - - if( params.containsKey('entry') ) - this.entryPoint = params.entry - - return this - } - @Override String getRunCommand() { def run = super.getRunCommand() diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ShifterConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ShifterConfig.groovy new file mode 100644 index 0000000000..bbb9ba6e9f --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/container/ShifterConfig.groovy @@ -0,0 +1,62 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.container + +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("shifter") +@Description(""" + The `shifter` scope controls how [Shifter](https://docs.nersc.gov/programming/shifter/overview/) containers are executed by Nextflow. +""") +@CompileStatic +class ShifterConfig implements ConfigScope, ContainerConfig { + + @ConfigOption + @Description(""" + Execute tasks with Shifter containers (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Comma-separated list of environment variable names to be included in the container environment. + """) + final List envWhitelist + + @ConfigOption + @Description(""" + """) + final boolean verbose + + /* required by extension point -- do not remove */ + ShifterConfig() {} + + ShifterConfig(Map opts) { + enabled = opts.enabled as boolean + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + verbose = opts.verbose as boolean + } + + @Override + String getEngine() { + return 'shifter' + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/container/SingularityBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/SingularityBuilder.groovy index 78394c06f7..6815e1061e 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/SingularityBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/SingularityBuilder.groovy @@ -31,7 +31,7 @@ import nextflow.SysEnv @Slf4j class SingularityBuilder extends ContainerBuilder { - private boolean autoMounts + protected boolean autoMounts private boolean homeMount @@ -49,6 +49,11 @@ class SingularityBuilder extends ContainerBuilder { this.runCmd0 = defaultRunCommand() } + SingularityBuilder(String name, SingularityConfig config) { + this(name) + applyConfig(config) + } + private boolean defaultHomeMount() { SysEnv.get("NXF_${getBinaryName().toUpperCase()}_HOME_MOUNT", 'false').toString() == 'true' } @@ -70,23 +75,25 @@ class SingularityBuilder extends ContainerBuilder { protected String getBinaryName() { 'singularity' } - @Override - SingularityBuilder params(Map params) { + protected void applyConfig(SingularityConfig config) { - if( params.containsKey('temp') ) - this.temp = params.temp + if( config.autoMounts != null ) + this.autoMounts = config.autoMounts - if( params.containsKey('entry') ) - this.entryPoint = params.entry + if( config.engineOptions ) + this.addEngineOptions(config.engineOptions) - if( params.containsKey('engineOptions') ) - addEngineOptions(params.engineOptions.toString()) + this.ociMode = config.ociMode - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) + if( config.runOptions ) + this.addRunOptions(config.runOptions) + } - if( params.autoMounts!=null ) - autoMounts = params.autoMounts.toString() == 'true' + @Override + SingularityBuilder params(Map params) { + + if( params.containsKey('entry') ) + this.entryPoint = params.entry if( params.newPidNamespace!=null ) newPidNamespace = params.newPidNamespace.toString() == 'true' @@ -94,12 +101,6 @@ class SingularityBuilder extends ContainerBuilder { if( params.containsKey('readOnlyInputs') ) this.readOnlyInputs = params.readOnlyInputs?.toString() == 'true' - // note: 'oci' flag should be ignored by Apptainer sub-class - if( params.oci!=null && this.class==SingularityBuilder ) - ociMode = params.oci.toString() == 'true' - else if( params.ociMode!=null && this.class==SingularityBuilder ) - ociMode = params.ociMode.toString() == 'true' - return this } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy b/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy index fbe76511d4..7861b0b62f 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/SingularityCache.groovy @@ -44,28 +44,33 @@ class SingularityCache { static final private Map> localImageNames = new ConcurrentHashMap<>() - private ContainerConfig config + private String cacheDir + + private String libraryDir + + private boolean noHttps + + private Duration pullTimeout private Map env private boolean missingCacheDir - private Duration pullTimeout = Duration.of('20min') - protected String getBinaryName() { return 'singularity' } protected String getAppName() { getBinaryName().capitalize() } protected String getEnvPrefix() { getBinaryName().toUpperCase() } - /** - * Create a Singularity cache object - * - * @param config A {@link ContainerConfig} object - * @param env The environment configuration object. Specifying {@code null} the current system environment is used - */ - SingularityCache(ContainerConfig config, Map env=null) { - this.config = config + static SingularityCache create(SingularityConfig config, Map env=null) { + new SingularityCache(config.cacheDir, config.libraryDir, config.noHttps, config.pullTimeout, env) + } + + SingularityCache(String cacheDir, String libraryDir, boolean noHttps, Duration pullTimeout, Map env=null) { + this.cacheDir = cacheDir + this.libraryDir = libraryDir + this.noHttps = noHttps + this.pullTimeout = pullTimeout this.env = env ?: SysEnv.get() } @@ -135,10 +140,7 @@ class SingularityCache { @PackageScope Path getCacheDir() { - if( config.pullTimeout ) - pullTimeout = config.pullTimeout as Duration - - def str = config.cacheDir as String + String str = cacheDir if( str ) return checkDir(str) @@ -150,14 +152,14 @@ class SingularityCache { if( str ) return checkDir(str) - def workDir = Global.session.workDir + Path workDir = Global.session.workDir if( workDir.fileSystem != FileSystems.default ) { // when the work dir is a remote path use the local launch directory to cache image files workDir = Const.appCacheDir.toAbsolutePath() } missingCacheDir = true - def result = workDir.resolve('singularity') + final result = workDir.resolve('singularity') result.mkdirs() return result @@ -177,7 +179,7 @@ class SingularityCache { */ @PackageScope Path getLibraryDir() { - def str = config.libraryDir as String ?: env.get("NXF_${envPrefix}_LIBRARYDIR".toString()) + final str = libraryDir ?: env.get("NXF_${envPrefix}_LIBRARYDIR".toString()) if( str ) return existsDir(str) @@ -264,7 +266,7 @@ class SingularityCache { // Construct a temporary name for the image file final tmpFile = getTempImagePath(targetPath) - final noHttpsOption = (config.noHttps)? '--no-https' : '' + final noHttpsOption = (noHttps)? '--no-https' : '' String cmd = "${binaryName} pull ${noHttpsOption} --name ${Escape.path(tmpFile.name)} $imageUrl > /dev/null" try { diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/SingularityConfig.java b/modules/nextflow/src/main/groovy/nextflow/container/SingularityConfig.groovy similarity index 51% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/SingularityConfig.java rename to modules/nextflow/src/main/groovy/nextflow/container/SingularityConfig.groovy index d0c0892a57..caee2d1783 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/SingularityConfig.java +++ b/modules/nextflow/src/main/groovy/nextflow/container/SingularityConfig.groovy @@ -13,85 +13,125 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package nextflow.config.scopes; +package nextflow.container -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration -public class SingularityConfig implements ConfigScope { +@ScopeName("singularity") +@Description(""" + The `singularity` scope controls how [Singularity](https://sylabs.io/singularity/) containers are executed by Nextflow. +""") +@CompileStatic +class SingularityConfig implements ConfigScope, ContainerConfig { @ConfigOption @Description(""" - When `true` Nextflow automatically mounts host paths in the executed container. It requires the `user bind control` feature to be enabled in your Singularity installation (default: `true`). + Automatically mount host paths in the executed container (default: `true`). It requires the `user bind control` feature to be enabled in your Singularity installation. """) - public boolean autoMounts; + final Boolean autoMounts @ConfigOption @Description(""" The directory where remote Singularity images are stored. When using a compute cluster, it must be a shared folder accessible to all compute nodes. """) - public String cacheDir; + final String cacheDir @ConfigOption @Description(""" - Enable Singularity execution (default: `false`). + Execute tasks with Singularity containers (default: `false`). """) - public boolean enabled; + final boolean enabled @ConfigOption @Description(""" - This attribute can be used to provide any option supported by the Singularity engine i.e. `singularity [OPTIONS]`. + Specify additional options supported by the Singularity engine i.e. `singularity [OPTIONS]`. """) - public String engineOptions; + final String engineOptions @ConfigOption @Description(""" Comma separated list of environment variable names to be included in the container environment. """) - public String envWhitelist; + final List envWhitelist @ConfigOption @Description(""" Directory where remote Singularity images are retrieved. When using a computing cluster it must be a shared folder accessible to all compute nodes. """) - public String libraryDir; + final String libraryDir @ConfigOption @Description(""" Pull the Singularity image with http protocol (default: `false`). """) - public boolean noHttps; + final boolean noHttps @ConfigOption @Description(""" When enabled, OCI (and Docker) container images are pull and converted to a SIF image file format implicitly by the Singularity run command, instead of Nextflow (default: `false`). """) - public boolean ociAutoPull; + final boolean ociAutoPull @ConfigOption @Description(""" Enable OCI-mode, that allows running native OCI compliant container image with Singularity using `crun` or `runc` as low-level runtime (default: `false`). """) - public boolean ociMode; + final Boolean ociMode @ConfigOption @Description(""" The amount of time the Singularity pull can last, after which the process is terminated (default: `20 min`). """) - public Duration pullTimeout; + final Duration pullTimeout @ConfigOption @Description(""" The registry from where Docker images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. """) - public String registry; + final String registry @ConfigOption @Description(""" - This attribute can be used to provide any extra command line options supported by `singularity exec`. + Specify extra command line options supported by `singularity exec`. """) - public String runOptions; + final String runOptions + + /* required by extension point -- do not remove */ + SingularityConfig() {} + + SingularityConfig(Map opts) { + autoMounts = opts.autoMounts as Boolean + cacheDir = opts.cacheDir + enabled = opts.enabled as boolean + engineOptions = opts.engineOptions + envWhitelist = ContainerHelper.parseEnvWhitelist(opts.envWhitelist) + libraryDir = opts.libraryDir + noHttps = opts.noHttps as boolean + ociAutoPull = opts.ociAutoPull as boolean + ociMode = opts.ociMode as Boolean + pullTimeout = opts.pullTimeout as Duration ?: Duration.of('20min') + registry = opts.registry + runOptions = opts.runOptions + } + + @Override + String getEngine() { + return 'singularity' + } + + @Override + boolean canRunOciImage() { + return ociMode || ociAutoPull + } + + @Override + String getFusionOptions() { + return ociMode ? '-B /dev/fuse' : null + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/UdockerBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/UdockerBuilder.groovy deleted file mode 100644 index 919d023ad9..0000000000 --- a/modules/nextflow/src/main/groovy/nextflow/container/UdockerBuilder.groovy +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2013-2024, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package nextflow.container - - -import groovy.transform.PackageScope -/** - * Wrap a task execution in a Udocker container - * - * See https://github.com/indigo-dc/udocker - * - * @author Paolo Di Tommaso - */ -class UdockerBuilder extends ContainerBuilder { - - private String temp - - private boolean remove = true - - UdockerBuilder( String image ) { - this.image = image - if( !this.image.contains(":") ) - this.image += ':latest' - } - - @Override - UdockerBuilder params(Map params) { - if( !params ) return this - - if( params.containsKey('temp') ) - this.temp = params.temp - - if( params.containsKey('runOptions') ) - addRunOptions(params.runOptions.toString()) - - if ( params.containsKey('remove') ) - this.remove = params.remove?.toString() == 'true' - - if( params.containsKey('entry') ) - this.entryPoint = params.entry - - return this - } - - @Override - UdockerBuilder build(StringBuilder result) { - assert image, 'Missing container image' - - result << 'udocker.py ' - result << 'run ' - - if( remove ) { - result << '--rm ' - } - - if( cpuset ) { - result << "--cpuset-cpus=$cpuset " - } - - // add the environment - appendEnv(result) - - if( temp ) - result << "-v $temp:/tmp " - - // mount the input folders - result << makeVolumes(mounts) - result << '-w "$NXF_TASK_WORKDIR" --bindhome ' - - - if( runOptions ) - result << runOptions.join(' ') << ' ' - - // the ID of the container to run - result << "\$(udocker.py create \"$image\")" - - this.runCommand = result.toString() - return this - } - - @Override - String getRunCommand() { - def run = super.getRunCommand() - def result = "((udocker.py images | grep -E -o \"^$image\\s\") || udocker.py pull \"$image\")>/dev/null\n" - result += "[[ \$? != 0 ]] && echo \"Udocker failed while pulling container \\`$image\\`\" >&2 && exit 1\n" - result += run - return result - } - - @PackageScope String getRunCommandRaw() { - super.getRunCommand() - } - -} diff --git a/modules/nextflow/src/main/groovy/nextflow/dag/DotRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/dag/DotRenderer.groovy index 497b0f5f51..7f403621da 100644 --- a/modules/nextflow/src/main/groovy/nextflow/dag/DotRenderer.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/dag/DotRenderer.groovy @@ -21,8 +21,6 @@ import java.nio.file.Path import groovy.transform.CompileStatic import groovy.transform.PackageScope import groovy.util.logging.Slf4j -import nextflow.Global -import nextflow.Session /** * Render the DAG using the Graphviz DOT format @@ -41,15 +39,8 @@ class DotRenderer implements DagRenderer { private final String direction - /** - * Create a render instance - * - * @param name The graph name used in the DOT format - */ - DotRenderer( String name ) { + DotRenderer(String name, String direction) { this.name = normalise(name) - final session = Global.session as Session - final direction = session.config.navigate('dag.direction', 'TB') as String if( direction !in ['TB','LR'] ) { log.warn "Invalid configuration property `dag.direction = '$direction'` - use either: 'TB' (top-bottom) or 'LR' (left-right)" this.direction = 'TB' diff --git a/modules/nextflow/src/main/groovy/nextflow/dag/GraphVizRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/dag/GraphVizRenderer.groovy index 6490bc7b16..06981e7368 100644 --- a/modules/nextflow/src/main/groovy/nextflow/dag/GraphVizRenderer.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/dag/GraphVizRenderer.groovy @@ -18,21 +18,26 @@ package nextflow.dag import java.nio.file.Files import java.nio.file.Path +import groovy.transform.CompileStatic import groovy.util.logging.Slf4j /** * @author Paolo Di Tommaso * @author Mike Smoot */ @Slf4j +@CompileStatic class GraphvizRenderer implements DagRenderer { + private String name + private String format - private String name + private String direction - GraphvizRenderer(String name, String format) { + GraphvizRenderer(String name, String format, String direction) { this.name = name this.format = format + this.direction = direction } /** @@ -45,7 +50,7 @@ class GraphvizRenderer implements DagRenderer { def result = Files.createTempFile('nxf-',".$format") def temp = Files.createTempFile('nxf-','.dot') // save the DAG as `dot` to a temp file - temp.text = new DotRenderer(name).renderNetwork(dag) + temp.text = new DotRenderer(name, direction).renderNetwork(dag) final cmd = "command -v dot &>/dev/null || exit 128 && dot -T${format} ${temp} > ${result}" final process = new ProcessBuilder().command("bash","-c", cmd).redirectErrorStream(true).start() diff --git a/modules/nextflow/src/main/groovy/nextflow/dag/MermaidHtmlRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/dag/MermaidHtmlRenderer.groovy index 08aae0e3b3..7b2667f629 100644 --- a/modules/nextflow/src/main/groovy/nextflow/dag/MermaidHtmlRenderer.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/dag/MermaidHtmlRenderer.groovy @@ -18,18 +18,28 @@ package nextflow.dag import java.nio.file.Path +import groovy.transform.CompileStatic +import nextflow.trace.config.DagConfig + /** * Render the DAG as a Mermaid diagram embedded in an HTML document. * See https://mermaid.js.org/ for more info. * * @author Ben Sherman */ +@CompileStatic class MermaidHtmlRenderer implements DagRenderer { + DagConfig config + + MermaidHtmlRenderer(DagConfig config) { + this.config = config + } + @Override void renderDocument(DAG dag, Path file) { final template = readTemplate() - final network = new MermaidRenderer().renderNetwork(dag) + final network = new MermaidRenderer(config).renderNetwork(dag) file.text = template.replace('REPLACE_WITH_NETWORK_DATA', network) } diff --git a/modules/nextflow/src/main/groovy/nextflow/dag/MermaidRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/dag/MermaidRenderer.groovy index 436bd540df..f805791f2d 100644 --- a/modules/nextflow/src/main/groovy/nextflow/dag/MermaidRenderer.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/dag/MermaidRenderer.groovy @@ -23,8 +23,7 @@ import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.TupleConstructor import groovy.util.logging.Slf4j -import nextflow.Global -import nextflow.Session +import nextflow.trace.config.DagConfig /** * Render the DAG using the Mermaid format to the specified file. * See https://mermaid-js.github.io/mermaid/#/ for more info. @@ -39,15 +38,17 @@ class MermaidRenderer implements DagRenderer { static private final String OUTPUTS = 'outputs' - private Session session = Global.session as Session + private int depth - private int depth = session.config.navigate('dag.depth', -1) as int + private String direction - private String direction = session.config.navigate('dag.direction', 'TB') + private boolean verbose - private boolean verbose = session.config.navigate('dag.verbose', false); + MermaidRenderer(DagConfig config) { + this.depth = config.depth + this.direction = config.direction + this.verbose = config.verbose - { if( direction !in ['TB','LR'] ) { log.warn "Invalid configuration property `dag.direction = '$direction'` - use either: 'TB' (top-bottom) or 'LR' (left-right)" this.direction = 'TB' diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/AbstractGridExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/AbstractGridExecutor.groovy index 306352bbb8..dc4bd013c6 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/AbstractGridExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/AbstractGridExecutor.groovy @@ -51,7 +51,7 @@ abstract class AbstractGridExecutor extends Executor { @Override protected void register () { super.register() - queueInterval = session.getQueueStatInterval(name) + queueInterval = config.getQueueStatInterval(name) log.debug "Creating executor '$name' > queue-stat-interval: ${queueInterval}" } @@ -60,7 +60,7 @@ abstract class AbstractGridExecutor extends Executor { * @return */ TaskMonitor createTaskMonitor() { - return TaskPollingMonitor.create(session, name, 100, Duration.of('5 sec')) + return TaskPollingMonitor.create(session, config, name, 100, Duration.of('5 sec')) } /* @@ -191,11 +191,11 @@ abstract class AbstractGridExecutor extends Executor { @PackageScope String resolveCustomJobName(TaskRun task) { try { - def custom = (Closure)session?.getExecConfigProp(name, 'jobName', null) + final custom = (Closure)config.getExecConfigProp(name, 'jobName', null) if( !custom ) return null - def ctx = [ (TaskProcessor.TASK_CONTEXT_PROPERTY_NAME): task.config ] + final ctx = [ (TaskProcessor.TASK_CONTEXT_PROPERTY_NAME): task.config ] custom.cloneWith(ctx).call()?.toString() } catch( Exception e ) { @@ -321,7 +321,7 @@ abstract class AbstractGridExecutor extends Executor { } Map getQueueStatus(queue) { - final global = session.getExecConfigProp(name, 'queueGlobalStatus',false) + final global = config.getExecConfigProp(name, 'queueGlobalStatus', false) if( global ) { log.debug1("Executor '$name' fetching queue global status") queue = null diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy index 793560a61d..d6e9a8bb7d 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy @@ -28,6 +28,7 @@ import groovy.transform.PackageScope import groovy.util.logging.Slf4j import nextflow.SysEnv import nextflow.container.ContainerBuilder +import nextflow.container.ContainerHelper import nextflow.container.DockerBuilder import nextflow.container.SingularityBuilder import nextflow.exception.ProcessException @@ -160,7 +161,12 @@ class BashWrapperBuilder { } protected boolean fixOwnership() { - systemOsName == 'Linux' && containerConfig?.fixOwnership && runWithContainer && containerConfig.engine == 'docker' // <-- note: only for docker (other container runtimes are not affected) + // note: only for docker (other container runtimes are not affected) + ContainerHelper.fixOwnership(containerConfig) && isLinuxOS() && runWithContainer + } + + protected isLinuxOS() { + systemOsName == 'Linux' } protected isMacOS() { @@ -654,8 +660,8 @@ class BashWrapperBuilder { } @PackageScope - ContainerBuilder createContainerBuilder0(String engine) { - ContainerBuilder.create(engine, containerImage) + ContainerBuilder createContainerBuilder0() { + ContainerBuilder.create(containerConfig, containerImage) } protected boolean getAllowContainerMounts() { @@ -672,8 +678,7 @@ class BashWrapperBuilder { @PackageScope ContainerBuilder createContainerBuilder(String changeDir) { - final engine = containerConfig.getEngine() - ContainerBuilder builder = createContainerBuilder0(engine) + final builder = createContainerBuilder0() /* * initialise the builder @@ -728,22 +733,17 @@ class BashWrapperBuilder { builder.addEnv(var) } - // set up run docker params - builder.params(containerConfig) - - // extra rule for the 'auto' temp dir temp dir - def temp = containerConfig.temp?.toString() - if( temp == 'auto' || temp == 'true' ) { + // extra rule for the 'auto' temp dir + if( containerConfig.getTemp() == 'auto' ) builder.setTemp( changeDir ? '$NXF_SCRATCH' : '$(nxf_mktemp)' ) - } - if( containerConfig.containsKey('kill') ) - builder.params(kill: containerConfig.kill) + if( containerConfig.getKill() != null ) + builder.params(kill: containerConfig.getKill()) - if( containerConfig.writableInputMounts==false ) + if( containerConfig.writableInputMounts()==false ) builder.params(readOnlyInputs: true) - if( this.containerConfig.entrypointOverride() ) + if( containerConfig.entrypointOverride() ) builder.params(entry: '/bin/bash') // give a chance to override any option with process specific `containerOptions` diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/BatchCleanup.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/BatchCleanup.groovy index fa5ccd0931..b27ac2feba 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/BatchCleanup.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/BatchCleanup.groovy @@ -28,16 +28,6 @@ import nextflow.Session @CompileStatic class BatchCleanup { - /** - * Default kill batch size - */ - int size = 100 - - /** - * The current session object - */ - Session session - /* * helper class */ @@ -67,9 +57,6 @@ class BatchCleanup { def pair = aggregate.get(name) if( !pair ) { aggregate.put(name, pair = new ExecutorJobsPair(executor)) - if( !session ) { - session = executor.session - } } pair.jobIds.add(jobId) } @@ -80,10 +67,9 @@ class BatchCleanup { */ void kill() { aggregate.values().each { pair -> - - int batchSize = session ? session.getExecConfigProp(pair.executor.name, 'killBatchSize', size) as int: size + final executor = pair.executor + final batchSize = executor.config.getExecConfigProp(executor.name, 'killBatchSize', 100) as int kill0(pair.executor, pair.jobIds, batchSize) - } } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/CrgExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/CrgExecutor.groovy index 4040d6a970..33c06da77c 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/CrgExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/CrgExecutor.groovy @@ -18,6 +18,7 @@ package nextflow.executor import groovy.transform.CompileStatic import groovy.util.logging.Slf4j +import nextflow.container.DockerConfig import nextflow.processor.TaskArrayRun import nextflow.processor.TaskRun /** @@ -118,7 +119,7 @@ class CrgExecutor extends SgeExecutor { // The Univa scheduler must allocate the required cores for the job execution // The variable '$SGE_BINDING' must contain the cores to be used if( task.container && task.isDockerEnabled() ) { - def opt = task.containerConfig.legacy ? '--cpuset' : '--cpuset-cpus' + def opt = legacyCpuOpts(task) ? '--cpuset' : '--cpuset-cpus' def str = "\n" str += "cpuset=\${cpuset:=''}\n" str += "[[ \$SGE_BINDING ]] && cpuset=\"$opt \$(echo \$SGE_BINDING | sed 's/ /,/g')\"\n" @@ -129,4 +130,8 @@ class CrgExecutor extends SgeExecutor { return builder } + private boolean legacyCpuOpts(TaskRun task) { + return task.containerConfig instanceof DockerConfig && ((DockerConfig) task.containerConfig).legacy + } + } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/Executor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/Executor.groovy index 4af74bfe1f..c79426288c 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/Executor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/Executor.groovy @@ -43,6 +43,11 @@ abstract class Executor { */ Session session + /** + * The `executor` configuration settings + */ + ExecutorConfig config + /** * The executor simple name */ diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorConfig.groovy new file mode 100644 index 0000000000..8d40eb5567 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorConfig.groovy @@ -0,0 +1,232 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.executor + +import groovy.transform.CompileStatic +import groovy.transform.Memoized +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration +import nextflow.util.MemoryUnit + +@ScopeName("executor") +@Description(""" + The `executor` scope controls various executor behaviors. +""") +@CompileStatic +class ExecutorConfig implements ConfigScope { + + @ConfigOption + @Description(""" + *Used only by the SLURM, LSF, PBS and PBS Pro executors.* + + The project or organization account that should be charged for running the pipeline jobs. + """) + final String account + + @ConfigOption + @Description(""" + *Used only by the local executor.* + + The maximum number of CPUs made available by the underlying system. + """) + final Integer cpus + + @ConfigOption + @Description(""" + Determines how often to log the executor status (default: `5 min`). + """) + final Duration dumpInterval + + @ConfigOption + @Description(""" + *Used only by grid executors.* + + Determines how long to wait for the `.exitcode` file to be created after the task has completed, before returning an error status (default: `270 sec`). + """) + final Duration exitReadTimeout + + @ConfigOption + @Description(''' + *Used only by grid executors and Google Batch.* + + Determines the name of jobs submitted to the underlying cluster executor: + ```groovy + executor.jobName = { "$task.name - $task.hash" } + ``` + ''') + final Closure jobName + + @ConfigOption + @Description(""" + Determines the number of jobs that can be killed in a single command execution (default: `100`). + """) + final int killBatchSize + + @ConfigOption + @Description(""" + *Used only by the local executor.* + + The maximum amount of memory made available by the underlying system. + """) + final MemoryUnit memory + + @ConfigOption + @Description(""" + The name of the executor to be used (default: `local`). + """) + final String name + + @ConfigOption + @Description(""" + *Used only by the SLURM executor.* + + When `true`, memory allocations for SLURM jobs are specified as `--mem-per-cpu ` instead of `--mem `. + """) + final boolean perCpuMemAllocation + + @ConfigOption + @Description(""" + *Used only by the LSF executor.* + + Enables the *per-job* memory limit mode for LSF jobs. + """) + final boolean perJobMemLimit + + @ConfigOption + @Description(""" + *Used only by the LSF executor.* + + Enables the *per-task* memory reserve mode for LSF jobs. + """) + final boolean perTaskReserve + + @ConfigOption + @Description(""" + Determines how often to check for process termination. Default varies for each executor. + """) + final Duration pollInterval + + @ConfigOption + @Description(""" + Determines how job status is retrieved. When `false` only the queue associated with the job execution is queried. When `true` the job status is queried globally i.e. irrespective of the submission queue (default: `false`). + """) + final boolean queueGlobalStatus + + @ConfigOption + @Description(""" + The number of tasks the executor will handle in a parallel manner. A queue size of zero corresponds to no limit. Default varies for each executor. + """) + final Integer queueSize + + @ConfigOption + @Description(""" + *Used only by grid executors.* + + Determines how often to fetch the queue status from the scheduler (default: `1 min`). + """) + final Duration queueStatInterval + + @Description(""" + The `executor.retry` scope controls the behavior of retrying failed job submissions. + + [Read more](https://nextflow.io/docs/latest/reference/config.html#executor) + """) + final ExecutorRetryConfig retry + + @ConfigOption + @Description(""" + Determines the max rate of job submission per time unit, for example `'10sec'` (10 jobs per second) or `'50/2min'` (50 jobs every 2 minutes) (default: unlimited). + """) + final String submitRateLimit + + private Map opts + + /* required by extension point -- do not remove */ + ExecutorConfig() {} + + ExecutorConfig(Map opts) { + account = opts.account + cpus = opts.cpus as Integer + dumpInterval = opts.dumpInterval as Duration ?: Duration.of('5min') + exitReadTimeout = opts.exitReadTimeout as Duration ?: Duration.of('270sec') + jobName = opts.jobName as Closure + killBatchSize = opts.killBatchSize != null ? opts.killBatchSize as int : 100 + memory = opts.memory as MemoryUnit + name = opts.name + perCpuMemAllocation = opts.perCpuMemAllocation as boolean + perJobMemLimit = opts.perJobMemLimit as boolean + perTaskReserve = opts.perTaskReserve as boolean + pollInterval = opts.pollInterval as Duration + queueGlobalStatus = opts.queueGlobalStatus as boolean + queueSize = opts.queueSize as Integer + queueStatInterval = opts.queueStatInterval as Duration ?: Duration.of('1min') + retry = opts.retry as ExecutorRetryConfig + submitRateLimit = opts.submitRateLimit + + // preserve executor-specific opts + this.opts = opts + } + + Duration getExitReadTimeout(String execName) { + getExecConfigProp(execName, 'exitReadTimeout', null) as Duration + } + + Duration getMonitorDumpInterval(String execName) { + getExecConfigProp(execName, 'dumpInterval', null) as Duration + } + + Duration getPollInterval(String execName, Duration defValue = null) { + getExecConfigProp(execName, 'pollInterval', defValue ?: Duration.of('1sec')) as Duration + } + + int getQueueSize(String execName, int defValue) { + getExecConfigProp(execName, 'queueSize', defValue) as int + } + + Duration getQueueStatInterval(String execName) { + getExecConfigProp(execName, 'queueStatInterval', null) as Duration + } + + @Memoized + Object getExecConfigProp(String execName, String name, Object defValue, Map env = null) { + // -- check `executor.$.` + final execProp = execProp(execName, name) + if( execProp != null ) + return execProp + + // -- check `executor.` + final prop = this.hasProperty(name) ? this.getProperty(name) : null + if( prop != null ) + return prop + + // -- check environment variable + final key = "NXF_EXECUTOR_${name.toUpperCase().replaceAll(/\./,'_')}".toString() + if( env == null ) + env = System.getenv() + return env.containsKey(key) ? env.get(key) : defValue + } + + private Object execProp(String execName, String name) { + if( !execName ) + return null + final result = opts['$' + execName] + return result instanceof Map ? result[name] : null + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorFactory.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorFactory.groovy index 0609d0245f..92effd68d9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorFactory.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorFactory.groovy @@ -204,6 +204,7 @@ class ExecutorFactory { protected Executor createExecutor( Class clazz, String name, Session session) { def result = clazz.newInstance() result.session = session + result.config = new ExecutorConfig(session.config.executor as Map ?: Collections.emptyMap()) result.name = name result.init() return result @@ -219,13 +220,8 @@ class ExecutorFactory { // create the processor object def result = taskConfig.executor?.toString() - if( !result ) { - if( session.config.executor instanceof String ) { - result = session.config.executor - } - else if( session.config.executor?.name instanceof String ) { - result = session.config.executor.name - } + if( !result && session.config.executor?.name instanceof String ) { + result = session.config.executor.name } log.debug "<< taskConfig executor: $result" diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorRetryConfig.java b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorRetryConfig.groovy similarity index 61% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorRetryConfig.java rename to modules/nextflow/src/main/groovy/nextflow/executor/ExecutorRetryConfig.groovy index baff55367d..c8436fecff 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorRetryConfig.java +++ b/modules/nextflow/src/main/groovy/nextflow/executor/ExecutorRetryConfig.groovy @@ -13,54 +13,68 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package nextflow.config.scopes; +package nextflow.executor -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description +import nextflow.util.Duration -public class ExecutorRetryConfig implements ConfigScope { +@CompileStatic +class ExecutorRetryConfig implements ConfigScope { @ConfigOption @Description(""" *Used only by grid executors.* Delay when retrying failed job submissions (default: `500ms`). - """) - public Duration delay; + """) + Duration delay = Duration.of('500ms') @ConfigOption @Description(""" *Used only by grid executors.* Jitter value when retrying failed job submissions (default: `0.25`). - """) - public double jitter; + """) + double jitter = 0.25 @ConfigOption @Description(""" *Used only by grid executors.* Max attempts when retrying failed job submissions (default: `3`). - """) - public int maxAttempts; + """) + int maxAttempts = 3 @ConfigOption @Description(""" *Used only by grid executors.* Max delay when retrying failed job submissions (default: `30s`). - """) - public Duration maxDelay; + """) + Duration maxDelay = Duration.of('30s') @ConfigOption @Description(""" *Used only by grid executors.* Regex pattern that when verified causes a failed submit operation to be re-tried (default: `Socket timed out`). - """) - public String reason; + """) + String reason = 'Socket timed out' + ExecutorRetryConfig(Map opts) { + if( opts.delay ) + delay = opts.delay as Duration + if( opts.jitter ) + jitter = opts.jitter as double + if( opts.maxAttempts ) + maxAttempts = opts.maxAttempts as int + if( opts.maxDelay ) + maxDelay = opts.maxDelay as Duration + if( opts.reason ) + reason = opts.reason + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/GridTaskHandler.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/GridTaskHandler.groovy index b91363f23c..63776de50d 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/GridTaskHandler.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/GridTaskHandler.groovy @@ -80,8 +80,6 @@ class GridTaskHandler extends TaskHandler implements FusionAwareTask { private Duration sanityCheckInterval - static private final Duration READ_TIMEOUT = Duration.of('270sec') // 4.5 minutes - BatchCleanup batch @TestOnly @@ -96,7 +94,7 @@ class GridTaskHandler extends TaskHandler implements FusionAwareTask { this.outputFile = task.workDir.resolve(TaskRun.CMD_OUTFILE) this.errorFile = task.workDir.resolve(TaskRun.CMD_ERRFILE) this.wrapperFile = task.workDir.resolve(TaskRun.CMD_RUN) - final duration = executor.session?.getExitReadTimeout(executor.name, READ_TIMEOUT) ?: READ_TIMEOUT + final duration = executor.config.getExitReadTimeout(executor.name) this.exitStatusReadTimeoutMillis = duration.toMillis() this.queue = task.config?.queue this.sanityCheckInterval = duration @@ -143,12 +141,7 @@ class GridTaskHandler extends TaskHandler implements FusionAwareTask { protected RetryPolicy retryPolicy() { - final delay = executor.session.getConfigAttribute("executor.retry.delay", '500ms') as Duration - final maxDelay = executor.session.getConfigAttribute("executor.retry.maxDelay", '30s') as Duration - final jitter = executor.session.getConfigAttribute("executor.retry.jitter", '0.25') as double - final maxAttempts = executor.session.getConfigAttribute("executor.retry.maxAttempts", '3') as int - final reason = executor.session.getConfigAttribute("executor.submit.retry.reason", 'Socket timed out') as String - + final retry = executor.config.retry final listener = new EventListener() { @Override void accept(ExecutionAttemptedEvent event) throws Throwable { @@ -170,10 +163,10 @@ class GridTaskHandler extends TaskHandler implements FusionAwareTask { } return RetryPolicy.builder() - .handleIf(retryCondition(reason)) - .withBackoff(delay.toMillis(), maxDelay.toMillis(), ChronoUnit.MILLIS) - .withMaxAttempts(maxAttempts) - .withJitter(jitter) + .handleIf(retryCondition(retry.reason)) + .withBackoff(retry.delay.toMillis(), retry.maxDelay.toMillis(), ChronoUnit.MILLIS) + .withMaxAttempts(retry.maxAttempts) + .withJitter(retry.jitter) .onFailedAttempt(listener) .build() } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/LsfExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/LsfExecutor.groovy index e5f425a2ac..d7e705a5e5 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/LsfExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/LsfExecutor.groovy @@ -118,7 +118,7 @@ class LsfExecutor extends AbstractGridExecutor implements TaskArrayExecutor { addClusterOptionsDirective(task.config, result) // add account from config - final account = session.getExecConfigProp(getName(), 'account', null) as String + final account = config.getExecConfigProp(name, 'account', null) as String if( account ) { result << '-G' << account } @@ -325,7 +325,7 @@ class LsfExecutor extends AbstractGridExecutor implements TaskArrayExecutor { log.debug "[LSF] Detected lsf.conf LSB_JOB_MEMLIMIT=$str ($perJobMemLimit)" } - perJobMemLimit = session.getExecConfigProp(name, 'perJobMemLimit', perJobMemLimit) + perJobMemLimit = config.getExecConfigProp(name, 'perJobMemLimit', perJobMemLimit) // per task reserve https://github.com/nextflow-io/nextflow/issues/1071#issuecomment-481412239 if( conf.get('RESOURCE_RESERVE_PER_TASK') ) { @@ -334,7 +334,7 @@ class LsfExecutor extends AbstractGridExecutor implements TaskArrayExecutor { log.debug "[LSF] Detected lsf.conf RESOURCE_RESERVE_PER_TASK=$str ($perTaskReserve)" } - perTaskReserve = session.getExecConfigProp(name, 'perTaskReserve', perTaskReserve) + perTaskReserve = config.getExecConfigProp(name, 'perTaskReserve', perTaskReserve) } @Override diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/NopeExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/NopeExecutor.groovy index 9b9fbddbf9..96b1b5b3f9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/NopeExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/NopeExecutor.groovy @@ -38,7 +38,7 @@ class NopeExecutor extends Executor { @Override protected TaskMonitor createTaskMonitor() { - return TaskPollingMonitor.create(session, name, 5, Duration.of('50ms')) + return TaskPollingMonitor.create(session, config, name, 5, Duration.of('50ms')) } @Override @@ -50,6 +50,7 @@ class NopeExecutor extends Executor { @Slf4j +@CompileStatic class NopeTaskHandler extends TaskHandler { protected NopeTaskHandler(TaskRun task) { diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/PbsExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/PbsExecutor.groovy index b25f020648..7905576b3e 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/PbsExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/PbsExecutor.groovy @@ -82,7 +82,7 @@ class PbsExecutor extends AbstractGridExecutor implements TaskArrayExecutor { } // add account from config - final account = session.getExecConfigProp(getName(), 'account', null) as String + final account = config.getExecConfigProp(name, 'account', null) as String if( account ) { result << '-P' << account } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/PbsProExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/PbsProExecutor.groovy index bcb0cbfa84..9df2982568 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/PbsProExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/PbsProExecutor.groovy @@ -90,7 +90,7 @@ class PbsProExecutor extends PbsExecutor { } // add account from config - final account = session.getExecConfigProp(getName(), 'account', null) as String + final account = config.getExecConfigProp(name, 'account', null) as String if( account ) { result << '-P' << account } diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/SlurmExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/SlurmExecutor.groovy index 08d7e08576..851a22394e 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/SlurmExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/SlurmExecutor.groovy @@ -103,7 +103,7 @@ class SlurmExecutor extends AbstractGridExecutor implements TaskArrayExecutor { addClusterOptionsDirective(task.config, result) // add slurm account from config - final account = session.getExecConfigProp(getName(), 'account', null) as String + final account = config.getExecConfigProp(name, 'account', null) as String if( account ) { result << '-A' << account } @@ -212,7 +212,7 @@ class SlurmExecutor extends AbstractGridExecutor implements TaskArrayExecutor { @Override void register() { super.register() - perCpuMemAllocation = session.getExecConfigProp(name, 'perCpuMemAllocation', false) + perCpuMemAllocation = config.getExecConfigProp(name, 'perCpuMemAllocation', false) } @Override diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/local/LocalExecutor.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/local/LocalExecutor.groovy index 1e98eab1a7..b6db10f7e1 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/local/LocalExecutor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/local/LocalExecutor.groovy @@ -39,7 +39,7 @@ class LocalExecutor extends Executor { @Override protected TaskMonitor createTaskMonitor() { - return LocalPollingMonitor.create(session, name) + return LocalPollingMonitor.create(session, config, name) } @Override diff --git a/modules/nextflow/src/main/groovy/nextflow/fusion/FusionConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/fusion/FusionConfig.groovy index dad6b4df16..c0a2389d59 100644 --- a/modules/nextflow/src/main/groovy/nextflow/fusion/FusionConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/fusion/FusionConfig.groovy @@ -25,14 +25,22 @@ import groovy.transform.Memoized import nextflow.Global import nextflow.Session import nextflow.SysEnv +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description import nextflow.util.MemoryUnit /** * Model Fusion config options * * @author Paolo Di Tommaso */ +@ScopeName("fusion") +@Description(""" + The `fusion` scope provides advanced configuration for the use of the [Fusion file system](https://docs.seqera.io/fusion). +""") @CompileStatic -class FusionConfig { +class FusionConfig implements ConfigScope { final static public String DEFAULT_FUSION_AMD64_URL = 'https://fusionfs.seqera.io/releases/v2.4-amd64.json' final static public String DEFAULT_FUSION_ARM64_URL = 'https://fusionfs.seqera.io/releases/v2.4-arm64.json' @@ -47,35 +55,73 @@ class FusionConfig { final static private Pattern VERSION_JSON = ~/https:\/\/.*\/releases\/v(\d+(?:\.\w+)*)-(\w*)\.json$/ - final private Boolean enabled - final private String containerConfigUrl - @Deprecated final private Boolean exportAwsAccessKeys - final private Boolean exportStorageCredentials - final private String logOutput - final private String logLevel - final private boolean tagsEnabled - final private String tagsPattern - final private boolean privileged - final private MemoryUnit cacheSize - final private boolean snapshots + @ConfigOption + @Description(""" + Enable the Fusion file system (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + The maximum size of the local cache used by the Fusion client. + """) + final MemoryUnit cacheSize + + @ConfigOption + @Description(""" + The URL of the container layer that provides the Fusion client. + """) + final String containerConfigUrl + + @ConfigOption + @Description(""" + Export the access credentials required by the underlying object storage to the task execution environment (default: `false`). + """) + final boolean exportStorageCredentials + + @ConfigOption + @Description(""" + The log level of the Fusion client. + """) + final String logLevel + + @ConfigOption + @Description(""" + The output location of the Fusion log. + """) + final String logOutput + + @ConfigOption + @Description(""" + Enable privileged containers for Fusion (default: `true`). + """) + final boolean privileged + + @ConfigOption + @Description(""" + Enable Fusion snapshotting (preview, default: `false`). This feature allows Fusion to automatically restore a job when it is interrupted by a spot reclamation. + """) + final boolean snapshots + + @ConfigOption(types=[Boolean]) + @Description(""" + The pattern that determines how tags are applied to files created via the Fusion client (default: `[.command.*|.exitcode|.fusion.*](nextflow.io/metadata=true),[*](nextflow.io/temporary=true)`). Set to `false` to disable tags. + """) + final String tags boolean enabled() { enabled } - @Deprecated boolean exportAwsAccessKeys() { exportAwsAccessKeys } - boolean exportStorageCredentials() { - return exportStorageCredentials!=null - ? exportStorageCredentials - : exportAwsAccessKeys + return exportStorageCredentials } String logLevel() { logLevel } String logOutput() { logOutput } - boolean tagsEnabled() { tagsEnabled } + boolean tagsEnabled() { tags != null } - String tagsPattern() { tagsPattern } + String tagsPattern() { tags } MemoryUnit cacheSize() { cacheSize } @@ -89,22 +135,34 @@ class FusionConfig { return privileged } + /* required by extension point -- do not remove */ + FusionConfig() {} + FusionConfig(Map opts, Map env=System.getenv()) { - this.enabled = opts.enabled - this.exportAwsAccessKeys = opts.exportAwsAccessKeys - this.exportStorageCredentials = opts.exportStorageCredentials - this.containerConfigUrl = opts.containerConfigUrl?.toString() ?: env.get('FUSION_CONTAINER_CONFIG_URL') + this.enabled = opts.enabled as boolean + this.exportStorageCredentials = (opts.exportStorageCredentials ?: opts.exportAwsAccessKeys) as boolean + this.containerConfigUrl = opts.containerConfigUrl ?: env.get('FUSION_CONTAINER_CONFIG_URL') this.logLevel = opts.logLevel this.logOutput = opts.logOutput - this.tagsEnabled = opts.tags==null || opts.tags.toString()!='false' - this.tagsPattern = (opts.tags==null || (opts.tags instanceof Boolean && opts.tags)) ? DEFAULT_TAGS : ( opts.tags !instanceof Boolean ? opts.tags as String : null ) - this.privileged = opts.privileged==null || opts.privileged.toString()=='true' + this.tags = parseTags(opts.tags) + this.privileged = opts.privileged == null || opts.privileged as boolean this.cacheSize = opts.cacheSize as MemoryUnit - this.snapshots = opts.snapshots as Boolean + this.snapshots = opts.snapshots as boolean + if( containerConfigUrl && !validProtocol(containerConfigUrl)) throw new IllegalArgumentException("Fusion container config URL should start with 'http:' or 'https:' protocol prefix - offending value: $containerConfigUrl") } + static private String parseTags(Object value) { + if( value == null ) + return DEFAULT_TAGS + if( value instanceof Boolean && value ) + return DEFAULT_TAGS + if( value instanceof CharSequence ) + return value + return null + } + protected boolean validProtocol(String url) { url.startsWith('http://') || url.startsWith('https://') || url.startsWith('file:/') } diff --git a/modules/nextflow/src/main/groovy/nextflow/fusion/FusionHelper.groovy b/modules/nextflow/src/main/groovy/nextflow/fusion/FusionHelper.groovy index 33c6f812ed..8119ab9d80 100644 --- a/modules/nextflow/src/main/groovy/nextflow/fusion/FusionHelper.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/fusion/FusionHelper.groovy @@ -47,13 +47,10 @@ class FusionHelper { static String runWithContainer(FusionScriptLauncher launcher, ContainerConfig containerConfig, String containerName, String containerOpts, List runCmd) { if( !containerName ) throw new IllegalArgumentException("Missing task container -- Fusion requires the task to be executed by a container process") - final engine = containerConfig.getEngine() - final containerBuilder = ContainerBuilder.create(engine, containerName) + final containerBuilder = ContainerBuilder.create(containerConfig, containerName) .addMountWorkDir(false) - .addRunOptions(containerConfig.runOptions as String) .addRunOptions(containerOpts) - .addRunOptions(containerConfig.fusionOptions()) - .params(containerConfig) + .addRunOptions(containerConfig.getFusionOptions()) // add fusion env vars for(Map.Entry it : launcher.fusionEnv()) { diff --git a/modules/nextflow/src/main/groovy/nextflow/mail/BaseMailProvider.groovy b/modules/nextflow/src/main/groovy/nextflow/mail/BaseMailProvider.groovy index ee72e12d0c..c17832bcb9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/mail/BaseMailProvider.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/mail/BaseMailProvider.groovy @@ -60,7 +60,7 @@ abstract class BaseMailProvider implements MailProvider { } private long sendTimeout(Mailer mailer) { - final timeout = mailer.config.sendMailTimeout as Duration + final timeout = mailer.config.sendMailTimeout return timeout ? timeout.toMillis() : SEND_MAIL_TIMEOUT } diff --git a/modules/nextflow/src/main/groovy/nextflow/mail/MailConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/mail/MailConfig.groovy new file mode 100644 index 0000000000..315856e33d --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/mail/MailConfig.groovy @@ -0,0 +1,111 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.mail + +import groovy.transform.CompileStatic +import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration + +@ScopeName("mail") +@Description(""" + The `mail` scope controls the mail server used to send email notifications. +""") +@CompileStatic +@ToString +class MailConfig implements ConfigScope { + + @ConfigOption + @Description(""" + Enable Java Mail logging for debugging purposes (default: `false`). + """) + final boolean debug + + @ConfigOption + @Description(""" + Default email sender address. + """) + final String from + + @ConfigOption + @Description(""" + """) + final Duration sendMailTimeout + + @Description(""" + The `mail.smtp` scope supports any SMTP configuration property in the [Java Mail API](https://javaee.github.io/javamail/). + + [Read more](https://javaee.github.io/javamail/docs/api/com/sun/mail/smtp/package-summary.html#properties) + """) + final SmtpOpts smtp + + /* required by extension point -- do not remove */ + MailConfig() {} + + MailConfig(Map opts) { + debug = opts.debug as boolean + from = opts.from + sendMailTimeout = opts.sendMailTimeout as Duration + smtp = opts.smtp != null ? new SmtpOpts(opts.smtp as Map) : null + } + +} + +@CompileStatic +class SmtpOpts implements ConfigScope { + + @ConfigOption + @Description(""" + Host name of the mail server. + """) + final String host + + @ConfigOption + @Description(""" + User password to connect to the mail server. + """) + final String password + + @ConfigOption + @Description(""" + Port number of the mail server. + """) + final Integer port + + @ConfigOption + @Description(""" + User name to connect to the mail server. + """) + final String user + + private Map opts + + SmtpOpts(Map opts) { + host = opts.host + password = opts.password + port = opts.port as Integer + user = opts.user + this.opts = opts + } + + Map toMap() { + return opts + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/mail/Mailer.groovy b/modules/nextflow/src/main/groovy/nextflow/mail/Mailer.groovy index 9d4264fce1..d85ff6e112 100644 --- a/modules/nextflow/src/main/groovy/nextflow/mail/Mailer.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/mail/Mailer.groovy @@ -34,6 +34,7 @@ import groovy.transform.CompileDynamic import groovy.transform.CompileStatic import groovy.transform.Memoized import groovy.util.logging.Slf4j +import nextflow.SysEnv import nextflow.io.LogOutputStream import nextflow.plugin.Plugins import org.jsoup.Jsoup @@ -72,21 +73,21 @@ class Mailer { /** * Holds mail settings and configuration attributes */ - private Map config = [:] + private MailConfig config - private Map env = System.getenv() + private Map env = SysEnv.get() private String fMailer - Mailer setConfig( Map params ) { - if( params ) { - if( config == null ) config = [:] - config.putAll(params) - } - return this + Mailer() { + this(Collections.emptyMap()) + } + + Mailer(Map opts) { + config = new MailConfig(opts) } - Map getConfig() { config } + MailConfig getConfig() { config } protected String getSysMailer() { if( !fMailer ) @@ -119,10 +120,10 @@ class Mailer { @CompileDynamic protected Properties createProps() { - if( config.smtp instanceof Map ) { - def cfg = [mail: [smtp: config.smtp]] as ConfigObject - def props = cfg.toProperties() - props.setProperty('mail.transport.protocol', config.transport?.protocol ?: 'smtp') + if( config.smtp != null ) { + final cfg = [mail: [smtp: config.smtp.toMap()]] as ConfigObject + final props = cfg.toProperties() + props.setProperty('mail.transport.protocol', 'smtp') // -- check proxy configuration if( !props.contains('mail.smtp.proxy.host') && System.getProperty('http.proxyHost') ) { props['mail.smtp.proxy.host'] = System.getProperty('http.proxyHost') @@ -130,9 +131,8 @@ class Mailer { } // -- debug for debugging - if( config.debug == true ) { + if( config.debug ) log.debug "Mail session properties:\n${dumpProps(props)}" - } else log.trace "Mail session properties:\n${dumpProps(props)}" return props @@ -159,7 +159,8 @@ class Mailer { protected Session getSession() { if( !session ) { session = Session.getInstance(createProps()) - if( config.debug != true ) return session + if( !config.debug ) + return session session.setDebugOut(new PrintStream( new LogOutputStream() { @Override protected void processLine(String line, int logLevel) { @@ -183,8 +184,8 @@ class Mailer { * @return The SMTP host port */ protected int getPort() { - def port = getConfig('port') - port ? port as int : -1 + final port = getConfig('port') + port != null ? port as int : -1 } /** @@ -202,12 +203,16 @@ class Mailer { } protected getConfig(String name) { - def key = "smtp.${name}" - def value = config.navigate(key) - if( !value ) { - // fallback on env properties - value = env.get("NXF_${key.toUpperCase().replace('.','_')}".toString()) - } + def value + + // check `mail.smtp` config + if( config.smtp != null ) + value = config.smtp.toMap().navigate(name) + + // check nvironment variable + if( !value ) + value = env.get("NXF_SMTP_${name.toUpperCase().replace('.','_')}".toString()) + return value } @@ -224,7 +229,7 @@ class Mailer { if( mail.from ) msg.addFrom(InternetAddress.parse(mail.from)) else if( config.from ) - msg.addFrom(InternetAddress.parse(config.from.toString())) + msg.addFrom(InternetAddress.parse(config.from)) if( mail.to ) msg.setRecipients(Message.RecipientType.TO, mail.to) @@ -375,7 +380,7 @@ class Mailer { log.warn "Unable to load AWS Simple Email Service (SES) client" } - if( config.containsKey('smtp') ) { + if( config.smtp != null ) { return providers.find(it -> it.name()=='javamail') } diff --git a/modules/nextflow/src/main/groovy/nextflow/mail/Notification.groovy b/modules/nextflow/src/main/groovy/nextflow/mail/Notification.groovy new file mode 100644 index 0000000000..8f9c3cbdb2 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/mail/Notification.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.mail + +import groovy.transform.CompileStatic +import groovy.transform.EqualsAndHashCode +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description + +@ScopeName("notification") +@Description(""" + The `notification` scope controls the automatic sending of an email notification on workflow completion. +""") +@CompileStatic +@EqualsAndHashCode +class Notification implements ConfigScope { + + @ConfigOption + @Description(""" + Map of variables that can be used in the template file. + """) + final Map attributes + + @ConfigOption + @Description(""" + Send an email notification when the workflow execution completes (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Sender address for the email notification. + """) + final String from + + @ConfigOption + @Description(""" + Path of a template file containing the contents of the email notification. + """) + final Object template + + @ConfigOption + @Description(""" + Recipient address for the email notification. Multiple addresses can be specified as a comma-separated list. + """) + final String to + + /* required by extension point -- do not remove */ + Notification() {} + + Notification(Map opts) { + attributes = opts.attributes as Map + enabled = opts.enabled as boolean + from = opts.from + template = opts.template + to = opts.to + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/processor/LocalPollingMonitor.groovy b/modules/nextflow/src/main/groovy/nextflow/processor/LocalPollingMonitor.groovy index d9dbda638a..c569b38a8f 100644 --- a/modules/nextflow/src/main/groovy/nextflow/processor/LocalPollingMonitor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/processor/LocalPollingMonitor.groovy @@ -22,6 +22,7 @@ import groovy.transform.CompileStatic import groovy.transform.PackageScope import groovy.util.logging.Slf4j import nextflow.Session +import nextflow.executor.ExecutorConfig import nextflow.exception.ProcessUnrecoverableException import nextflow.util.Duration import nextflow.util.MemoryUnit @@ -64,6 +65,7 @@ class LocalPollingMonitor extends TaskPollingMonitor { * Valid parameters are: *
  • name: The name of the executor for which the polling monitor is created *
  • session: The current {@code Session} + *
  • config: The `executor` configuration settings *
  • capacity: The maximum number of this monitoring queue *
  • pollInterval: Determines how often a poll occurs to check for a process termination *
  • dumpInterval: Determines how often the executor status is written in the application log file @@ -83,22 +85,23 @@ class LocalPollingMonitor extends TaskPollingMonitor { * * @param session * The current {@link Session} object + * @param config + * The `executor` configuration settings * @param name * The name of the executor that created this tasks monitor * @return * An instance of {@link LocalPollingMonitor} */ - static LocalPollingMonitor create(Session session, String name) { + static LocalPollingMonitor create(Session session, ExecutorConfig config, String name) { assert session + assert config assert name - final defPollInterval = Duration.of('100ms') - final pollInterval = session.getPollInterval(name, defPollInterval) - final dumpInterval = session.getMonitorDumpInterval(name) - - final int cpus = configCpus(session,name) - final long memory = configMem(session,name) - final int size = session.getQueueSize(name, OS.getAvailableProcessors()) + final pollInterval = config.getPollInterval(name, Duration.of('100ms')) + final dumpInterval = config.getMonitorDumpInterval(name) + final cpus = configCpus(config, name) + final memory = configMem(config, name) + final size = config.getQueueSize(name, OS.getAvailableProcessors()) log.debug "Creating local task monitor for executor '$name' > cpus=$cpus; memory=${new MemoryUnit(memory)}; capacity=$size; pollInterval=$pollInterval; dumpInterval=$dumpInterval" @@ -107,6 +110,7 @@ class LocalPollingMonitor extends TaskPollingMonitor { cpus: cpus, memory: memory, session: session, + config: config, capacity: size, pollInterval: pollInterval, dumpInterval: dumpInterval, @@ -114,8 +118,8 @@ class LocalPollingMonitor extends TaskPollingMonitor { } @PackageScope - static int configCpus(Session session, String name) { - int cpus = session.getExecConfigProp(name, 'cpus', 0) as int + static int configCpus(ExecutorConfig config, String name) { + int cpus = config.getExecConfigProp(name, 'cpus', 0) as int if( !cpus ) cpus = OS.getAvailableProcessors() @@ -124,8 +128,9 @@ class LocalPollingMonitor extends TaskPollingMonitor { } @PackageScope - static long configMem(Session session, String name) { - (session.getExecConfigProp(name, 'memory', OS.getTotalPhysicalMemorySize()) as MemoryUnit).toBytes() + static long configMem(ExecutorConfig config, String name) { + final memory = config.getExecConfigProp(name, 'memory', OS.getTotalPhysicalMemorySize()) as MemoryUnit + return memory.toBytes() } /** diff --git a/modules/nextflow/src/main/groovy/nextflow/processor/ParallelPollingMonitor.groovy b/modules/nextflow/src/main/groovy/nextflow/processor/ParallelPollingMonitor.groovy index 9c15029d7b..78c7cfd380 100644 --- a/modules/nextflow/src/main/groovy/nextflow/processor/ParallelPollingMonitor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/processor/ParallelPollingMonitor.groovy @@ -41,6 +41,7 @@ class ParallelPollingMonitor extends TaskPollingMonitor { * Valid parameters are: *
  • name: The name of the executor for which the polling monitor is created *
  • session: The current {@code Session} + *
  • config: The `executor` configuration settings *
  • capacity: The maximum number of this monitoring queue *
  • pollInterval: Determines how often a poll occurs to check for a process termination *
  • dumpInterval: Determines how often the executor status is written in the application log file diff --git a/modules/nextflow/src/main/groovy/nextflow/processor/TaskArrayRun.groovy b/modules/nextflow/src/main/groovy/nextflow/processor/TaskArrayRun.groovy index 9621386d1c..9640c9c718 100644 --- a/modules/nextflow/src/main/groovy/nextflow/processor/TaskArrayRun.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/processor/TaskArrayRun.groovy @@ -40,10 +40,11 @@ class TaskArrayRun extends TaskRun { @Override ContainerConfig getContainerConfig() { final config = super.getContainerConfig() - final envWhitelist = config.getEnvWhitelist() ?: [] - final executor = (TaskArrayExecutor)processor.getExecutor() - envWhitelist << executor.getArrayIndexName() - config.put('envWhitelist', envWhitelist) + final envWhitelist = config.getEnvWhitelist() + if( envWhitelist != null ) { + final executor = (TaskArrayExecutor)processor.getExecutor() + envWhitelist.add(executor.getArrayIndexName()) + } return config } diff --git a/modules/nextflow/src/main/groovy/nextflow/processor/TaskPollingMonitor.groovy b/modules/nextflow/src/main/groovy/nextflow/processor/TaskPollingMonitor.groovy index b36055bea9..e4bb18cbed 100644 --- a/modules/nextflow/src/main/groovy/nextflow/processor/TaskPollingMonitor.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/processor/TaskPollingMonitor.groovy @@ -16,12 +16,6 @@ package nextflow.processor -import nextflow.cloud.CloudSpotTerminationException -import nextflow.exception.FailedGuardException -import nextflow.exception.ProcessEvalException -import nextflow.exception.ProcessException -import nextflow.exception.ProcessRetryableException - import static nextflow.processor.TaskProcessor.* import java.util.concurrent.ExecutorService @@ -36,8 +30,14 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.SysEnv +import nextflow.cloud.CloudSpotTerminationException +import nextflow.exception.FailedGuardException +import nextflow.exception.ProcessEvalException +import nextflow.exception.ProcessException +import nextflow.exception.ProcessRetryableException import nextflow.exception.ProcessSubmitTimeoutException import nextflow.executor.BatchCleanup +import nextflow.executor.ExecutorConfig import nextflow.executor.GridTaskHandler import nextflow.util.Duration import nextflow.util.SysHelper @@ -60,6 +60,11 @@ class TaskPollingMonitor implements TaskMonitor { */ final Session session + /** + * The `executor` configuration settings + */ + final ExecutorConfig config + /** * The time interval (in milliseconds) elapsed which execute a new poll */ @@ -133,6 +138,7 @@ class TaskPollingMonitor implements TaskMonitor { * Valid parameters are: *
  • name: The name of the executor for which the polling monitor is created *
  • session: The current {@code Session} + *
  • config: The `executor` configuration settings *
  • capacity: The maximum number of this monitoring queue *
  • pollInterval: Determines how often a poll occurs to check for a process termination *
  • dumpInterval: Determines how often the executor status is written in the application log file @@ -147,34 +153,37 @@ class TaskPollingMonitor implements TaskMonitor { this.name = params.name this.session = params.session as Session + this.config = params.config as ExecutorConfig this.pollIntervalMillis = ( params.pollInterval as Duration ).toMillis() - this.dumpInterval = (params.dumpInterval as Duration) ?: Duration.of('5min') + this.dumpInterval = params.dumpInterval as Duration this.capacity = (params.capacity ?: 0) as int this.pendingQueue = new LinkedBlockingQueue() this.runningQueue = new LinkedBlockingQueue() } - static TaskPollingMonitor create( Session session, String name, int defQueueSize, Duration defPollInterval ) { + static TaskPollingMonitor create( Session session, ExecutorConfig config, String name, int defQueueSize, Duration defPollInterval ) { assert session + assert config assert name - final capacity = session.getQueueSize(name, defQueueSize) - final pollInterval = session.getPollInterval(name, defPollInterval) - final dumpInterval = session.getMonitorDumpInterval(name) + final capacity = config.getQueueSize(name, defQueueSize) + final pollInterval = config.getPollInterval(name, defPollInterval) + final dumpInterval = config.getMonitorDumpInterval(name) log.debug "Creating task monitor for executor '$name' > capacity: $capacity; pollInterval: $pollInterval; dumpInterval: $dumpInterval " - new TaskPollingMonitor(name: name, session: session, capacity: capacity, pollInterval: pollInterval, dumpInterval: dumpInterval) + new TaskPollingMonitor(name: name, session: session, config: config, capacity: capacity, pollInterval: pollInterval, dumpInterval: dumpInterval) } - static TaskPollingMonitor create( Session session, String name, Duration defPollInterval ) { + static TaskPollingMonitor create( Session session, ExecutorConfig config, String name, Duration defPollInterval ) { assert session + assert config assert name - final pollInterval = session.getPollInterval(name, defPollInterval) - final dumpInterval = session.getMonitorDumpInterval(name) + final pollInterval = config.getPollInterval(name, defPollInterval) + final dumpInterval = config.getMonitorDumpInterval(name) log.debug "Creating task monitor for executor '$name' > pollInterval: $pollInterval; dumpInterval: $dumpInterval " - new TaskPollingMonitor(name: name, session: session, pollInterval: pollInterval, dumpInterval: dumpInterval) + new TaskPollingMonitor(name: name, session: session, config: config, pollInterval: pollInterval, dumpInterval: dumpInterval) } /** @@ -343,11 +352,11 @@ class TaskPollingMonitor implements TaskMonitor { } protected RateLimiter createSubmitRateLimit() { - def limit = session.getExecConfigProp(name,'submitRateLimit',null) as String + final limit = config.getExecConfigProp(name, 'submitRateLimit', null) as String if( !limit ) return null - def tokens = limit.tokenize('/') + final tokens = limit.tokenize('/') if( tokens.size() == 2 ) { /* * the rate limit is provide num of task over a duration diff --git a/modules/nextflow/src/main/groovy/nextflow/processor/TaskRun.groovy b/modules/nextflow/src/main/groovy/nextflow/processor/TaskRun.groovy index 20ab76ec36..543e06b80d 100644 --- a/modules/nextflow/src/main/groovy/nextflow/processor/TaskRun.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/processor/TaskRun.groovy @@ -29,6 +29,7 @@ import nextflow.Session import nextflow.conda.CondaCache import nextflow.conda.CondaConfig import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.container.resolver.ContainerInfo import nextflow.container.resolver.ContainerMeta import nextflow.container.resolver.ContainerResolver @@ -640,10 +641,10 @@ class TaskRun implements Cloneable { } private Path getCondaEnv0() { - if( !config.conda || !processor.session.getCondaConfig().isEnabled() ) + if( !config.conda || !getCondaConfig().isEnabled() ) return null - final cache = new CondaCache(processor.session.getCondaConfig()) + final cache = new CondaCache(getCondaConfig()) cache.getCachePathFor(config.conda as String) } @@ -747,7 +748,7 @@ class TaskRun implements Cloneable { // when 'eng' is null the setting for the current engine marked as 'enabled' will be used final result = sess.getContainerConfig(eng) - ?: new ContainerConfig(engine:'docker') + ?: new DockerConfig([:]) // if a configuration is found is expected to enabled by default if( exe.isContainerNative() ) { result.setEnabled(true) diff --git a/modules/nextflow/src/main/groovy/nextflow/script/WorkflowMetadata.groovy b/modules/nextflow/src/main/groovy/nextflow/script/WorkflowMetadata.groovy index 6f0bbfbfec..394d39dbf9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/script/WorkflowMetadata.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/script/WorkflowMetadata.groovy @@ -485,11 +485,8 @@ class WorkflowMetadata { */ protected void safeMailNotification() { try { - final notifier = new WorkflowNotifier( - workflow: this, - config: session.config, - variables: NF.binding.variables ) - notifier.sendNotification() + final notifier = new WorkflowNotifier(NF.binding.variables, this) + notifier.sendNotification(session.config) } catch (Exception e) { log.warn "Failed to deliver notification email -- See the log file for details", e diff --git a/modules/nextflow/src/main/groovy/nextflow/script/WorkflowNotifier.groovy b/modules/nextflow/src/main/groovy/nextflow/script/WorkflowNotifier.groovy index 364a3af1e8..d14df3f7af 100644 --- a/modules/nextflow/src/main/groovy/nextflow/script/WorkflowNotifier.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/script/WorkflowNotifier.groovy @@ -24,6 +24,7 @@ import groovy.util.logging.Slf4j import nextflow.mail.Attachment import nextflow.mail.Mail import nextflow.mail.Mailer +import nextflow.mail.Notification /** * Send workflow completion notification * @@ -34,12 +35,7 @@ import nextflow.mail.Mailer class WorkflowNotifier { /** - * A map representing the nextflow configuration - */ - private Map config - - /** - * A map representing the variables defined in the script global scope + * A map representing the variables defined in the script global scope */ private Map variables @@ -48,16 +44,20 @@ class WorkflowNotifier { */ private WorkflowMetadata workflow + WorkflowNotifier(Map variables, WorkflowMetadata workflow) { + this.variables = variables + this.workflow = workflow + } + /** * Send notification email * * @param config A {@link Map} representing the nextflow configuration object */ - void sendNotification() { + void sendNotification(Map config) { + final notification = new Notification( config.notification as Map ?: Collections.emptyMap() ) - // fetch the `notification` configuration map defined in the config file - def notification = (Map)config.notification - if (!notification || !notification.enabled) { + if (!notification.enabled) { return } @@ -67,7 +67,7 @@ class WorkflowNotifier { } def mail = createMail(notification) - def mailer = createMailer( (Map)config.mail ) + def mailer = createMailer( config.mail as Map ?: Collections.emptyMap() ) mailer.send(mail) } @@ -78,23 +78,15 @@ class WorkflowNotifier { * @return A {@link Mailer} object */ protected Mailer createMailer(Map config) { - def mailer = new Mailer() - mailer.config = config - return mailer + return new Mailer(config) } /** * Create notification {@link nextflow.mail.Mail} object given the user parameters * * @param notification - * The user provided notification parameters - * - to: one or more comma separate notification recipient email address - * - from: the sender email address - * - template: template file path, multiple templates can be provided by using a list object - * - binding: user provided map representing the variables used in the template - * @return */ - protected Mail createMail(Map notification) { + protected Mail createMail(Notification notification) { def mail = new Mail() // -- the subject diff --git a/modules/nextflow/src/main/groovy/nextflow/spack/SpackConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/spack/SpackConfig.groovy index 6465f87156..4de5961a41 100644 --- a/modules/nextflow/src/main/groovy/nextflow/spack/SpackConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/spack/SpackConfig.groovy @@ -17,44 +17,64 @@ package nextflow.spack import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.util.Duration /** * Model Spack configuration * * @author Marco De La Pierre */ +@ScopeName("spack") +@Description(""" + The `spack` scope controls the creation of a Spack environment by the Spack package manager. +""") @CompileStatic -class SpackConfig extends LinkedHashMap { +class SpackConfig implements ConfigScope { - private Map env + @ConfigOption + @Description(""" + Execute tasks with Spack environments (default: `false`). + """) + final boolean enabled - /* required by Kryo deserialization -- do not remove */ - private SpackConfig() { } + @ConfigOption + @Description(""" + The path where Spack environments are stored. It should be accessible from all compute nodes when using a shared file system. + """) + final String cacheDir - SpackConfig(Map config, Map env) { - super(config) - this.env = env - } + @ConfigOption + @Description(""" + Enable checksum verification of source tarballs (default: `true`). + """) + final boolean checksum - boolean isEnabled() { - def enabled = get('enabled') - if( enabled == null ) - enabled = env.get('NXF_SPACK_ENABLED') - return enabled?.toString() == 'true' - } + @ConfigOption + @Description(""" + The amount of time to wait for the Spack environment to be created before failing (default: `60 min`). + """) + final Duration createTimeout + + @ConfigOption + @Description(""" + The maximum number of parallel package builds (default: the number of available CPUs). + """) + final Integer parallelBuilds + + /* required by extension point -- do not remove */ + SpackConfig() {} - List getChannels() { - final value = get('channels') - if( !value ) { - return Collections.emptyList() - } - if( value instanceof List ) { - return value - } - if( value instanceof CharSequence ) { - return value.tokenize(',').collect(it -> it.trim()) - } - - throw new IllegalArgumentException("Unexpected spack.channels value: $value") + SpackConfig(Map opts, Map env) { + enabled = opts.enabled != null + ? opts.enabled as boolean + : (env.NXF_SPACK_ENABLED?.toString() == 'true') + cacheDir = opts.cacheDir + checksum = opts.checksum as boolean + createTimeout = opts.createTimeout as Duration + parallelBuilds = opts.parallelBuilds as Integer } } diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/DefaultObserverFactory.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/DefaultObserverFactory.groovy index ea7d16ab42..c28b266b28 100644 --- a/modules/nextflow/src/main/groovy/nextflow/trace/DefaultObserverFactory.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/trace/DefaultObserverFactory.groovy @@ -16,11 +16,12 @@ package nextflow.trace -import java.nio.file.Path - import groovy.transform.CompileStatic import nextflow.Session -import nextflow.file.FileHelper +import nextflow.trace.config.DagConfig +import nextflow.trace.config.ReportConfig +import nextflow.trace.config.TimelineConfig +import nextflow.trace.config.TraceConfig /** * Creates Nextflow observes object @@ -30,14 +31,14 @@ import nextflow.file.FileHelper @CompileStatic class DefaultObserverFactory implements TraceObserverFactoryV2 { - private Map config + private Session session @Override Collection create(Session session) { - this.config = session.config + this.session = session final result = new ArrayList(5) - createAnsiLogObserver(result, session) + createAnsiLogObserver(result) createGraphObserver(result) createReportObserver(result) createTimelineObserver(result) @@ -45,7 +46,7 @@ class DefaultObserverFactory implements TraceObserverFactoryV2 { return result } - protected void createAnsiLogObserver(Collection result, Session session) { + protected void createAnsiLogObserver(Collection result) { if( session.ansiLog ) { session.ansiLogObserver = new AnsiLogObserver() result << session.ansiLogObserver @@ -53,56 +54,31 @@ class DefaultObserverFactory implements TraceObserverFactoryV2 { } protected void createReportObserver(Collection result) { - final isEnabled = config.navigate('report.enabled') as Boolean - if( !isEnabled ) - return - - final fileName = config.navigate('report.file', ReportObserver.DEF_FILE_NAME) as String - final maxTasks = config.navigate('report.maxTasks', ReportObserver.DEF_MAX_TASKS) as int - final overwrite = config.navigate('report.overwrite') as Boolean - - final observer = new ReportObserver(FileHelper.asPath(fileName), overwrite) - observer.maxTasks = maxTasks - result << observer + final opts = session.config.report as Map ?: Collections.emptyMap() + final config = new ReportConfig(opts) + if( config.enabled ) + result << new ReportObserver(config) } protected void createTimelineObserver(Collection result) { - final isEnabled = config.navigate('timeline.enabled') as Boolean - if( !isEnabled ) - return - - final fileName = config.navigate('timeline.file', TimelineObserver.DEF_FILE_NAME) as String - final overwrite = config.navigate('timeline.overwrite') as Boolean - - result << new TimelineObserver(FileHelper.asPath(fileName), overwrite) + final opts = session.config.timeline as Map ?: Collections.emptyMap() + final config = new TimelineConfig(opts) + if( config.enabled ) + result << new TimelineObserver(config) } protected void createGraphObserver(Collection result) { - final isEnabled = config.navigate('dag.enabled') as Boolean - if( !isEnabled ) - return - - final fileName = config.navigate('dag.file', GraphObserver.DEF_FILE_NAME) as String - final overwrite = config.navigate('dag.overwrite') as Boolean - - result << new GraphObserver(FileHelper.asPath(fileName), overwrite) + final opts = session.config.dag as Map ?: Collections.emptyMap() + final config = new DagConfig(opts) + if( config.enabled ) + result << new GraphObserver(config) } protected void createTraceFileObserver(Collection result) { - final isEnabled = config.navigate('trace.enabled') as Boolean - if( !isEnabled ) - return - - final fields = config.navigate('trace.fields', '') as String - final fileName = config.navigate('trace.file', TraceFileObserver.DEF_FILE_NAME) as String - final overwrite = config.navigate('trace.overwrite') as Boolean - final raw = config.navigate('trace.raw') as Boolean - final separator = config.navigate('trace.sep', '\t') as String - - final observer = new TraceFileObserver(FileHelper.asPath(fileName), overwrite, separator) - observer.useRawNumbers(raw) - observer.setFieldsAndFormats(fields) - result << observer + final opts = session.config.trace as Map ?: Collections.emptyMap() + final config = new TraceConfig(opts) + if( config.enabled ) + result << new TraceFileObserver(config) } } diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/GraphObserver.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/GraphObserver.groovy index 5f3dd84846..51c3a8a431 100644 --- a/modules/nextflow/src/main/groovy/nextflow/trace/GraphObserver.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/trace/GraphObserver.groovy @@ -31,6 +31,7 @@ import nextflow.dag.MermaidRenderer import nextflow.dag.MermaidHtmlRenderer import nextflow.exception.AbortOperationException import nextflow.file.FileHelper +import nextflow.trace.config.DagConfig /** * Render the DAG document on pipeline completion using the * format specified by the user @@ -41,7 +42,7 @@ import nextflow.file.FileHelper @CompileStatic class GraphObserver implements TraceObserverV2 { - static public final String DEF_FILE_NAME = "dag-${TraceHelper.launchTimestampFmt()}.html" + private DagConfig config private Path file @@ -51,18 +52,15 @@ class GraphObserver implements TraceObserverV2 { private String format - private boolean overwrite - String getFormat() { format } String getName() { name } - GraphObserver(Path file, Boolean overwrite=false) { - assert file - this.file = file + GraphObserver(DagConfig config) { + this.config = config + this.file = FileHelper.asPath(config.file) this.name = file.baseName this.format = file.getExtension().toLowerCase() ?: 'html' - this.overwrite = overwrite } @Override @@ -71,9 +69,9 @@ class GraphObserver implements TraceObserverV2 { // check file existence final attrs = FileHelper.readAttributes(file) if( attrs ) { - if( overwrite && (attrs.isDirectory() || !file.delete()) ) + if( config.overwrite && (attrs.isDirectory() || !file.delete()) ) throw new AbortOperationException("Unable to overwrite existing DAG file: ${file.toUriString()}") - else if( !overwrite ) + else if( !config.overwrite ) throw new AbortOperationException("DAG file already exists: ${file.toUriString()} -- enable `dag.overwrite` in your config file to overwrite existing DAG files") } } @@ -93,19 +91,19 @@ class GraphObserver implements TraceObserverV2 { @PackageScope DagRenderer createRender() { if( format == 'dot' ) - new DotRenderer(name) + new DotRenderer(name, config.direction) else if( format == 'html' ) - new MermaidHtmlRenderer() + new MermaidHtmlRenderer(config) else if( format == 'gexf' ) new GexfRenderer(name) else if( format == 'mmd' ) - new MermaidRenderer() + new MermaidRenderer(config) else - new GraphvizRenderer(name, format) + new GraphvizRenderer(name, format, config.direction) } } diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/ReportObserver.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/ReportObserver.groovy index 0cbb5c1cd5..a496d24b24 100644 --- a/modules/nextflow/src/main/groovy/nextflow/trace/ReportObserver.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/trace/ReportObserver.groovy @@ -24,8 +24,10 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.exception.AbortOperationException +import nextflow.file.FileHelper import nextflow.processor.TaskId import nextflow.script.WorkflowMetadata +import nextflow.trace.config.ReportConfig import nextflow.trace.event.TaskEvent import nextflow.util.TestOnly /** @@ -39,10 +41,6 @@ import nextflow.util.TestOnly @CompileStatic class ReportObserver implements TraceObserverV2 { - static final public String DEF_FILE_NAME = "report-${TraceHelper.launchTimestampFmt()}.html" - - static final public int DEF_MAX_TASKS = 10_000 - /** * Holds the the start time for tasks started/submitted but not yet completed */ @@ -62,7 +60,7 @@ class ReportObserver implements TraceObserverV2 { * Max number of tasks allowed in the report, when they exceed this * number the tasks table is omitted */ - private int maxTasks = DEF_MAX_TASKS + private int maxTasks /** * Compute resources usage stats @@ -74,14 +72,10 @@ class ReportObserver implements TraceObserverV2 { */ private boolean overwrite - /** - * Creates a report observer - * - * @param file The file path where to store the resulting HTML report document - */ - ReportObserver(Path file, Boolean overwrite=false) { - this.reportFile = file - this.overwrite = overwrite + ReportObserver(ReportConfig config) { + this.reportFile = FileHelper.asPath(config.file) + this.maxTasks = config.maxTasks + this.overwrite = config.overwrite } @TestOnly @@ -111,18 +105,6 @@ class ReportObserver implements TraceObserverV2 { records } - /** - * Set the number max allowed tasks. If this number is exceed the the tasks - * json in not included in the final report - * - * @param value The number of max task record allowed to be included in the HTML report - * @return The {@link ReportObserver} itself - */ - ReportObserver setMaxTasks( int value ) { - this.maxTasks = value - return this - } - /** * Create the trace file, in file already existing with the same name it is * "rolled" to a new file diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/TimelineObserver.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/TimelineObserver.groovy index f5420487ab..dda26c1e5f 100644 --- a/modules/nextflow/src/main/groovy/nextflow/trace/TimelineObserver.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/trace/TimelineObserver.groovy @@ -25,7 +25,9 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.exception.AbortOperationException +import nextflow.file.FileHelper import nextflow.processor.TaskId +import nextflow.trace.config.TimelineConfig import nextflow.trace.event.TaskEvent import nextflow.util.Duration import nextflow.util.TestOnly @@ -39,8 +41,6 @@ import org.apache.commons.lang.StringEscapeUtils @CompileStatic class TimelineObserver implements TraceObserverV2 { - public static final String DEF_FILE_NAME = "timeline-${TraceHelper.launchTimestampFmt()}.html" - /** * Holds the the start time for tasks started/submitted but not yet completed */ @@ -61,9 +61,9 @@ class TimelineObserver implements TraceObserverV2 { private boolean overwrite - TimelineObserver(Path file, Boolean overwrite=false) { - this.reportFile = file - this.overwrite = overwrite + TimelineObserver(TimelineConfig config) { + this.reportFile = FileHelper.asPath(config.file) + this.overwrite = config.overwrite } @TestOnly diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/TraceFileObserver.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/TraceFileObserver.groovy index 1aa74e6a6f..f34dbd4558 100644 --- a/modules/nextflow/src/main/groovy/nextflow/trace/TraceFileObserver.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/trace/TraceFileObserver.groovy @@ -25,8 +25,10 @@ import groovy.transform.PackageScope import groovy.util.logging.Slf4j import groovyx.gpars.agent.Agent import nextflow.Session +import nextflow.file.FileHelper import nextflow.processor.TaskHandler import nextflow.processor.TaskId +import nextflow.trace.config.TraceConfig import nextflow.trace.event.TaskEvent import nextflow.util.TestOnly /** @@ -38,8 +40,6 @@ import nextflow.util.TestOnly @CompileStatic class TraceFileObserver implements TraceObserverV2 { - public static final String DEF_FILE_NAME = "trace-${TraceHelper.launchTimestampFmt()}.txt" - /** * The list of fields included in the trace report */ @@ -91,6 +91,17 @@ class TraceFileObserver implements TraceObserverV2 { private boolean useRawNumber + TraceFileObserver(TraceConfig config) { + tracePath = FileHelper.asPath(config.file) + overwrite = config.overwrite + separator = config.sep + useRawNumbers(config.raw) + setFieldsAndFormats(config.fields) + } + + @TestOnly + protected TraceFileObserver() {} + void setFields( List entries ) { final names = TraceRecord.FIELDS.keySet() @@ -175,20 +186,6 @@ class TraceFileObserver implements TraceObserverV2 { return this } - /** - * Create the trace observer - * - * @param traceFile A path to the file where save the tracing data - */ - TraceFileObserver(Path traceFile, Boolean overwrite=false, String separator='\t') { - this.tracePath = traceFile - this.overwrite = overwrite - this.separator = separator - } - - @TestOnly - protected TraceFileObserver() {} - /** * Create the trace file, in file already existing with the same name it is * "rolled" to a new file diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/DagConfig.java b/modules/nextflow/src/main/groovy/nextflow/trace/config/DagConfig.groovy similarity index 55% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/DagConfig.java rename to modules/nextflow/src/main/groovy/nextflow/trace/config/DagConfig.groovy index 93f0bce1bd..ff998000be 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/DagConfig.java +++ b/modules/nextflow/src/main/groovy/nextflow/trace/config/DagConfig.groovy @@ -13,47 +13,55 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package nextflow.config.scopes; +package nextflow.trace.config -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.trace.TraceHelper -public class DagConfig implements ConfigScope { +@ScopeName("dag") +@Description(""" + The `dag` scope controls the workflow diagram generated by Nextflow. +""") +@CompileStatic +class DagConfig implements ConfigScope { @ConfigOption @Description(""" When `true` enables the generation of the DAG file (default: `false`). """) - public boolean enabled; + final boolean enabled @ConfigOption @Description(""" - *Only supported by the HTML and Mermaid renderers.* + *Supported by the HTML and Mermaid renderers.* Controls the maximum depth at which to render sub-workflows (default: no limit). """) - public int depth; + final int depth @ConfigOption @Description(""" - *Only supported by the HTML and Mermaid renderers.* + *Supported by the Graphviz, DOT, HTML and Mermaid renderers.* Controls the direction of the DAG, can be `'LR'` (left-to-right) or `'TB'` (top-to-bottom) (default: `'TB'`). """) - public String direction; + final String direction @ConfigOption @Description(""" Graph file name (default: `'dag-.html'`). """) - public String file; + final String file @ConfigOption @Description(""" When `true` overwrites any existing DAG file with the same name (default: `false`). """) - public boolean overwrite; + final boolean overwrite @ConfigOption @Description(""" @@ -61,6 +69,22 @@ Graph file name (default: `'dag-.html'`). When `false`, channel names are omitted, operators are collapsed, and empty workflow inputs are removed (default: `false`). """) - public boolean verbose; + final boolean verbose + + /* required by extension point -- do not remove */ + DagConfig() {} + + DagConfig(Map opts) { + enabled = opts.enabled as boolean + depth = opts.depth != null ? opts.depth as int : -1 + direction = opts.direction ?: 'TB' + file = opts.file ?: defaultFileName() + overwrite = opts.overwrite as boolean + verbose = opts.verbose as boolean + } + + static final String defaultFileName() { + return "dag-${TraceHelper.launchTimestampFmt()}.html" + } } diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/config/ReportConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/config/ReportConfig.groovy new file mode 100644 index 0000000000..e3acd3944d --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/trace/config/ReportConfig.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.trace.config + +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.trace.TraceHelper + +@ScopeName("report") +@Description(""" + The `report` scope allows you to configure the workflow [execution report](https://nextflow.io/docs/latest/tracing.html#execution-report). +""") +@CompileStatic +class ReportConfig implements ConfigScope { + + static final int DEF_MAX_TASKS = 10_000 + + @ConfigOption + @Description(""" + Create the execution report on workflow completion (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + The path of the created execution report file (default: `'report-.html'`). + """) + final String file + + @ConfigOption + @Description(""" + """) + final int maxTasks + + @ConfigOption + @Description(""" + Overwrite any existing report file with the same name (default: `false`). + """) + final boolean overwrite + + /* required by extension point -- do not remove */ + ReportConfig() {} + + ReportConfig(Map opts) { + enabled = opts.enabled as boolean + file = opts.file ?: defaultFileName() + maxTasks = opts.maxTasks != null ? opts.maxTasks as int : DEF_MAX_TASKS + overwrite = opts.overwrite as boolean + } + + static final String defaultFileName() { + return "report-${TraceHelper.launchTimestampFmt()}.html" + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/config/TimelineConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/config/TimelineConfig.groovy new file mode 100644 index 0000000000..7cd0a828bd --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/trace/config/TimelineConfig.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.trace.config + +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.trace.TraceHelper + +@ScopeName("timeline") +@Description(""" + The `timeline` scope controls the execution timeline report generated by Nextflow. +""") +@CompileStatic +class TimelineConfig implements ConfigScope { + + @ConfigOption + @Description(""" + Create the timeline report on workflow completion file (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Timeline file name (default: `'timeline-.html'`). + """) + final String file + + @ConfigOption + @Description(""" + Overwrite any existing timeline file with the same name (default: `false`). + """) + final boolean overwrite + + /* required by extension point -- do not remove */ + TimelineConfig() {} + + TimelineConfig(Map opts) { + enabled = opts.enabled as boolean + file = opts.file ?: defaultFileName() + overwrite = opts.overwrite as boolean + } + + static final String defaultFileName() { + return "timeline-${TraceHelper.launchTimestampFmt()}.html" + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/trace/config/TraceConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/trace/config/TraceConfig.groovy new file mode 100644 index 0000000000..51bab16363 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/trace/config/TraceConfig.groovy @@ -0,0 +1,84 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.trace.config + +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.trace.TraceHelper + +@ScopeName("trace") +@Description(""" + The `trace` scope controls the layout of the execution trace file generated by Nextflow. +""") +@CompileStatic +class TraceConfig implements ConfigScope { + + @ConfigOption + @Description(""" + Create the execution trace file on workflow completion (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + Comma-separated list of [trace fields](https://nextflow.io/docs/latest/tracing.html#trace-report) to include in the report. + """) + final String fields + + @ConfigOption + @Description(""" + Trace file name (default: `'trace-.txt'`). + """) + final String file + + @ConfigOption + @Description(""" + Overwrite any existing trace file with the same name (default: `false`). + """) + final boolean overwrite + + @ConfigOption + @Description(""" + Report trace metrics as raw numbers where applicable, i.e. report duration values in milliseconds and memory values in bytes (default: `false`). + """) + final boolean raw + + @ConfigOption + @Description(""" + Character used to separate values in each row (default: `\\t`). + """) + final String sep + + /* required by extension point -- do not remove */ + TraceConfig() {} + + TraceConfig(Map opts) { + enabled = opts.enabled as boolean + fields = opts.fields ?: '' + file = opts.file ?: defaultFileName() + overwrite = opts.overwrite as boolean + raw = opts.raw as boolean + sep = opts.sep ?: '\t' + } + + static final String defaultFileName() { + return "trace-${TraceHelper.launchTimestampFmt()}.txt" + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/util/ConfigHelper.groovy b/modules/nextflow/src/main/groovy/nextflow/util/ConfigHelper.groovy index 50db0093ad..6c211673af 100644 --- a/modules/nextflow/src/main/groovy/nextflow/util/ConfigHelper.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/util/ConfigHelper.groovy @@ -38,23 +38,6 @@ import org.yaml.snakeyaml.Yaml @CompileStatic class ConfigHelper { - - def static getConfigProperty( def config, String execName, String propName ) { - def result = null - - // make sure that the *executor* is a map object - // it could also be a plain string (when it specifies just the its name) - if( execName && config instanceof Map && config['$'+execName] instanceof Map ) { - result = config['$'+execName][propName] - } - - if( result==null && config instanceof Map && config[propName] != null ) { - result = config[propName] - } - - return result - } - /** * Given a string value converts to its native object representation. * diff --git a/modules/nextflow/src/main/resources/META-INF/extensions.idx b/modules/nextflow/src/main/resources/META-INF/extensions.idx index 7fb037c37d..7250b10d76 100644 --- a/modules/nextflow/src/main/resources/META-INF/extensions.idx +++ b/modules/nextflow/src/main/resources/META-INF/extensions.idx @@ -14,15 +14,34 @@ # limitations under the License. # -nextflow.trace.DefaultObserverFactory -nextflow.util.DefaultSerializers -nextflow.secret.LocalSecretsProvider nextflow.cache.DefaultCacheFactory -nextflow.scm.RepositoryFactory +nextflow.conda.CondaConfig +nextflow.config.ConfigMap +nextflow.config.Manifest +nextflow.config.WorkflowConfig +nextflow.container.ApptainerConfig +nextflow.container.CharliecloudConfig +nextflow.container.DockerConfig +nextflow.container.PodmanConfig +nextflow.container.SarusConfig +nextflow.container.ShifterConfig +nextflow.container.SingularityConfig nextflow.container.resolver.DefaultContainerResolver +nextflow.executor.ExecutorConfig +nextflow.fusion.FusionConfig +nextflow.fusion.FusionTokenDefault +nextflow.mail.JavaMailProvider +nextflow.mail.MailConfig +nextflow.mail.Notification nextflow.mail.SendMailProvider nextflow.mail.SimpleMailProvider -nextflow.mail.JavaMailProvider nextflow.processor.tip.DefaultTaskTipProvider -nextflow.fusion.FusionTokenDefault - +nextflow.scm.RepositoryFactory +nextflow.secret.LocalSecretsProvider +nextflow.spack.SpackConfig +nextflow.trace.DefaultObserverFactory +nextflow.trace.config.DagConfig +nextflow.trace.config.ReportConfig +nextflow.trace.config.TimelineConfig +nextflow.trace.config.TraceConfig +nextflow.util.DefaultSerializers diff --git a/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy b/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy index f317cc265c..18710b064a 100644 --- a/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/SessionTest.groovy @@ -22,6 +22,9 @@ import java.nio.file.attribute.PosixFilePermission import nextflow.config.Manifest import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig +import nextflow.container.PodmanConfig +import nextflow.container.SarusConfig import nextflow.exception.AbortOperationException import nextflow.file.FileHelper import nextflow.script.ScriptFile @@ -72,120 +75,6 @@ class SessionTest extends Specification { } - - def 'test get queue size'() { - - when: - def session = [:] as Session - session.config = [ executor:['$sge':[queueSize: 123] ] ] - then: - session.getQueueSize('sge', 1) == 123 - session.getQueueSize('xxx', 1) == 1 - session.getQueueSize(null, 1) == 1 - - when: - def session2 = [:] as Session - session2.config = [ executor:[ queueSize: 321, '$sge':[queueSize:789] ] ] - then: - session2.getQueueSize('sge', 2) == 789 - session2.getQueueSize('xxx', 2) == 321 - session2.getQueueSize(null, 2) == 321 - - - when: - def session3 = [:] as Session - session3.config = [ executor: 'sge' ] - then: - session3.getQueueSize('sge', 1) == 1 - session3.getQueueSize('xxx', 2) == 2 - session3.getQueueSize(null, 3) == 3 - - - } - - def 'test get poll interval'() { - - when: - def session1 = [:] as Session - session1.config = [ executor:['$sge':[pollInterval: 345] ] ] - then: - session1.getPollInterval('sge').toMillis() == 345 - session1.getPollInterval('xxx').toMillis() == 1_000 - session1.getPollInterval(null).toMillis() == 1_000 - session1.getPollInterval(null, 2_000 as Duration).toMillis() == 2_000 - - when: - def session2 = [:] as Session - session2.config = [ executor:[ pollInterval: 321, '$sge':[pollInterval:789] ] ] - then: - session2.getPollInterval('sge').toMillis() == 789 - session2.getPollInterval('xxx').toMillis() == 321 - session2.getPollInterval(null).toMillis() == 321 - - when: - def session3 = [:] as Session - session3.config = [ executor: 'lsf' ] - then: - session3.getPollInterval('sge', 33 as Duration ).toMillis() == 33 - session3.getPollInterval('xxx', 44 as Duration ).toMillis() == 44 - session3.getPollInterval(null, 55 as Duration).toMillis() == 55 - - } - - def 'test get exit read timeout'() { - - setup: - def session1 = [:] as Session - session1.config = [ executor:['$sge':[exitReadTimeout: '5s'] ] ] - - expect: - session1.getExitReadTimeout('sge') == '5sec' as Duration - session1.getExitReadTimeout('lsf', '3sec' as Duration) == '3sec' as Duration - - } - - def 'test get queue stat interval'() { - - setup: - def session1 = [:] as Session - session1.config = [ executor:['$sge':[queueStatInterval: '4sec'] ] ] - - expect: - session1.getQueueStatInterval('sge') == '4sec' as Duration - session1.getQueueStatInterval('lsf', '1sec' as Duration) == '1sec' as Duration - - } - - def 'test monitor dump interval'() { - - setup: - def session1 = [:] as Session - session1.config = [ executor:['$sge':[dumpInterval: '6sec'] ] ] - - expect: - session1.getMonitorDumpInterval('sge') == '6sec' as Duration - session1.getMonitorDumpInterval('lsf', '2sec' as Duration) == '2sec' as Duration - - } - - def 'test get exec config prop'() { - - when: - def session = [:] as Session - session.config = [ executor: [x:123, y:222, '$hazelcast': [y:333] ] ] - then: - session.getExecConfigProp( 'hazelcast', 'x', null ) == 123 - session.getExecConfigProp( 'hazelcast', 'y', null ) == 333 - session.getExecConfigProp( 'local', 'y', null ) == 222 - session.getExecConfigProp( 'local', 'y', 'beta') == 222 - session.getExecConfigProp( 'hazelcast', 'z', null ) == null - session.getExecConfigProp( 'hazelcast', 'z', 'alpha') == 'alpha' - session.getExecConfigProp( 'hazelcast', 'z', 'alpha', [NXF_EXECUTOR_Z:'hola']) == 'hola' - session.getExecConfigProp( 'hazelcast', 'p.q.z', null, [NXF_EXECUTOR_P_Q_Z:'hello']) == 'hello' - } - - - def 'test add lib path'() { setup: @@ -305,7 +194,7 @@ class SessionTest extends Specification { session.init(script) then: - session.binding != null + session.binding != null session.baseDir == folder session.workDir.isAbsolute() !session.workDir.toString().contains('..') @@ -314,7 +203,7 @@ class SessionTest extends Specification { session.observersV1 != null session.observersV2 != null session.workflowMetadata != null - + cleanup: session.classesDir?.deleteDir() @@ -359,7 +248,7 @@ class SessionTest extends Specification { def session = new Session([(ENGINE): CONFIG]) expect: - session.containerConfig == new ContainerConfig(CONFIG + [engine:ENGINE]) + session.containerConfig instanceof ContainerConfig session.containerConfig.enabled session.containerConfig.engine == ENGINE @@ -369,7 +258,6 @@ class SessionTest extends Specification { 'docker' | [enabled: true, x:'alpha', y: 'beta', registry: 'd.reg'] 'podman' | [enabled: true, x:'alpha', y: 'beta'] 'podman' | [enabled: true, x:'alpha', y: 'beta', registry: 'd.reg'] - 'udocker' | [enabled: true, x:'alpha', y: 'beta'] 'sarus' | [enabled: true, x:'delta', y: 'gamma'] 'shifter' | [enabled: true, x:'delta', y: 'gamma'] 'singularity' | [enabled: true, x:'delta', y: 'gamma'] @@ -382,13 +270,13 @@ class SessionTest extends Specification { def session = new Session(config) expect: - session.getContainerConfig(null) == new ContainerConfig(engine:'docker', registry:'docker.io') + session.getContainerConfig(null) == new DockerConfig(registry:'docker.io') and: - session.getContainerConfig('docker') == new ContainerConfig(engine:'docker', registry:'docker.io') + session.getContainerConfig('docker') == new DockerConfig(registry:'docker.io') and: - session.getContainerConfig('podman') == new ContainerConfig(engine:'podman', registry:'quay.io') + session.getContainerConfig('podman') == new PodmanConfig(registry:'quay.io') and: - session.getContainerConfig('sarus') == new ContainerConfig(engine:'sarus') + session.getContainerConfig('sarus') == new SarusConfig([:]) } @Unroll @@ -397,7 +285,7 @@ class SessionTest extends Specification { def session = Spy(new Session([conda: CONFIG])) expect: session.condaConfig.isEnabled() == EXPECTED - + where: EXPECTED | CONFIG | ENV false | [:] | [:] @@ -412,7 +300,7 @@ class SessionTest extends Specification { def session = Spy(new Session([spack: CONFIG])) expect: session.spackConfig.isEnabled() == EXPECTED - + where: EXPECTED | CONFIG | ENV false | [:] | [:] @@ -437,53 +325,6 @@ class SessionTest extends Specification { } } - def 'should get config attribute' () { - - given: - def session = Spy(Session) - - when: - def result = session.getConfigAttribute('alpha', 'hello') - then: - result == 'hello' - - when: - result = session.getConfigAttribute('delta', 'hello') - then: - session.getConfig() >> [delta: '1234'] - result == '1234' - - when: - result = session.getConfigAttribute('omega', 'hello') - then: - session.getSystemEnv() >> [NXF_OMEGA: '6789'] - result == '6789' - } - - def 'should get config nested attribute' () { - - given: - def session = Spy(Session) - - when: - def result = session.getConfigAttribute('alpha.beta.delta', 'hello') - then: - result == 'hello' - - when: - result = session.getConfigAttribute('alpha.beta.gamma', 'hello') - then: - session.getConfig() >> [alpha: [beta: [gamma: 'abc']]] - result == 'abc' - - when: - result = session.getConfigAttribute('alpha.beta.omega', 'hello') - then: - session.getSystemEnv() >> [NXF_ALPHA_BETA_OMEGA: 'OK'] - result == 'OK' - - } - @Unroll def 'should check valid process name with selector=#SELECTOR' () { @@ -495,7 +336,7 @@ class SessionTest extends Specification { session.checkValidProcessName(NAMES, SELECTOR, error) then: error[0] == MSG - + where: SELECTOR | NAMES | MSG 'foo' | ['foo','bar'] | null diff --git a/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy index 8bed3a6ab1..d8a0286056 100644 --- a/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/conda/CondaCacheTest.groovy @@ -19,6 +19,7 @@ package nextflow.conda import java.nio.file.Files import java.nio.file.Paths +import nextflow.SysEnv import spock.lang.Specification /** * @@ -26,6 +27,14 @@ import spock.lang.Specification */ class CondaCacheTest extends Specification { + def setupSpec() { + SysEnv.push([:]) + } + + def cleanupSpec() { + SysEnv.pop() + } + def 'should env file' () { given: @@ -359,7 +368,7 @@ class CondaCacheTest extends Specification { def ENV = 'bwa=1.1.1' def PREFIX = Paths.get('/foo/bar') and: - def cache = Spy(new CondaCache(new CondaConfig([channels:['bioconda','defaults']]))) + def cache = Spy(new CondaCache(new CondaConfig([channels:['bioconda','defaults']], [:]))) when: def result = cache.createLocalCondaEnv0(ENV, PREFIX) @@ -448,7 +457,8 @@ class CondaCacheTest extends Specification { def 'should get options from the config' () { when: - def cache = new CondaCache(new CondaConfig()) + def config = new CondaConfig([:], [:]) + def cache = new CondaCache(config) then: cache.createTimeout.minutes == 20 cache.createOptions == null @@ -458,7 +468,8 @@ class CondaCacheTest extends Specification { cache.binaryName == "conda" when: - cache = new CondaCache(new CondaConfig(createTimeout: '5 min', createOptions: '--foo --bar', cacheDir: '/conda/cache', useMamba: true)) + config = new CondaConfig([createTimeout: '5 min', createOptions: '--foo --bar', cacheDir: '/conda/cache', useMamba: true], [:]) + cache = new CondaCache(config) then: cache.createTimeout.minutes == 5 cache.createOptions == '--foo --bar' @@ -468,7 +479,8 @@ class CondaCacheTest extends Specification { cache.binaryName == "mamba" when: - cache = new CondaCache(new CondaConfig(createTimeout: '5 min', createOptions: '--foo --bar', cacheDir: '/conda/cache', useMicromamba: true)) + config = new CondaConfig([createTimeout: '5 min', createOptions: '--foo --bar', cacheDir: '/conda/cache', useMicromamba: true], [:]) + cache = new CondaCache(config) then: cache.createTimeout.minutes == 5 cache.createOptions == '--foo --bar' @@ -482,7 +494,7 @@ class CondaCacheTest extends Specification { given: def folder = Files.createTempDirectory('test'); folder.deleteDir() - def config = new CondaConfig(cacheDir: folder.toString()) + def config = new CondaConfig([cacheDir: folder.toString()], [:]) CondaCache cache = Spy(CondaCache, constructorArgs: [config]) when: @@ -500,7 +512,7 @@ class CondaCacheTest extends Specification { given: def folder = Paths.get('.test-conda-cache-' + Math.random()) - def config = new CondaConfig(cacheDir: folder.toString()) + def config = new CondaConfig([cacheDir: folder.toString()], [:]) CondaCache cache = Spy(CondaCache, constructorArgs: [config]) when: diff --git a/modules/nextflow/src/test/groovy/nextflow/conda/CondaConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/conda/CondaConfigTest.groovy index 80a5857fc8..e8309ed4fb 100644 --- a/modules/nextflow/src/test/groovy/nextflow/conda/CondaConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/conda/CondaConfigTest.groovy @@ -39,10 +39,10 @@ class CondaConfigTest extends Specification { false | [enabled: false] | [:] true | [enabled: true] | [:] and: - false | [:] | [NXF_CONDA_ENABLED: false] - true | [:] | [NXF_CONDA_ENABLED: true] - false | [enabled: false] | [NXF_CONDA_ENABLED: true] // <-- config has priority - true | [enabled: true] | [NXF_CONDA_ENABLED: true] + false | [:] | [NXF_CONDA_ENABLED: 'false'] + true | [:] | [NXF_CONDA_ENABLED: 'true'] + false | [enabled: false] | [NXF_CONDA_ENABLED: 'true'] // <-- config has priority + true | [enabled: true] | [NXF_CONDA_ENABLED: 'true'] } diff --git a/modules/nextflow/src/test/groovy/nextflow/config/ConfigBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/config/ConfigBuilderTest.groovy index ead6b64d59..fff17bbe92 100644 --- a/modules/nextflow/src/test/groovy/nextflow/config/ConfigBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/config/ConfigBuilderTest.groovy @@ -29,7 +29,6 @@ import nextflow.exception.AbortOperationException import nextflow.exception.ConfigParseException import nextflow.extension.FilesEx import nextflow.secret.SecretsLoader -import nextflow.trace.TraceHelper import nextflow.util.ConfigHelper import spock.lang.Ignore import spock.lang.Specification @@ -40,8 +39,12 @@ import spock.lang.Unroll */ class ConfigBuilderTest extends Specification { - def setup() { - TraceHelper.testTimestampFmt = '20221001' + def setupSpec() { + SysEnv.push([:]) + } + + def cleanupSpec() { + SysEnv.pop() } def 'build config object' () { @@ -833,7 +836,6 @@ class ConfigBuilderTest extends Specification { then: // command line should override the config file config.trace instanceof Map config.trace.enabled - config.trace.file == 'trace-20221001.txt' } def 'should set session report options' () { @@ -889,7 +891,6 @@ class ConfigBuilderTest extends Specification { then: config.report instanceof Map config.report.enabled - config.report.file == 'report-20221001.html' } @@ -946,7 +947,6 @@ class ConfigBuilderTest extends Specification { then: config.dag instanceof Map config.dag.enabled - config.dag.file == 'dag-20221001.html' } def 'should set session weblog options' () { @@ -1065,7 +1065,6 @@ class ConfigBuilderTest extends Specification { then: config.timeline instanceof Map config.timeline.enabled - config.timeline.file == 'timeline-20221001.html' } def 'should set tower options' () { @@ -1714,10 +1713,12 @@ class ConfigBuilderTest extends Specification { when: + SysEnv.push(HOME: '/home/user') def opt = new CliOptions(config: [file.toFile().canonicalPath] ) def cfg = new ConfigBuilder().setOptions(opt).build() + SysEnv.pop() then: - cfg.params.foo == System.getenv('HOME') + cfg.params.foo == '/home/user' when: file.text = diff --git a/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy b/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy index 2860cb3c3e..6c48b8b2fe 100644 --- a/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/config/ManifestTest.groovy @@ -61,12 +61,12 @@ class ManifestTest extends Specification { manifest.contributors[0].name == 'Alice' manifest.contributors[0].affiliation == 'University' manifest.contributors[0].email == 'alice@university.edu' - manifest.contributors[0].contribution == [ContributionType.AUTHOR, ContributionType.MAINTAINER] as Set + manifest.contributors[0].contribution == [ContributionType.AUTHOR, ContributionType.MAINTAINER] manifest.contributors[0].orcid == 'https://orcid.org/0000-0000-0000-0000' manifest.contributors[1].name == 'Bob' manifest.contributors[1].affiliation == 'Company' manifest.contributors[1].email == 'bob@company.com' - manifest.contributors[1].contribution == [ContributionType.CONTRIBUTOR] as Set + manifest.contributors[1].contribution == [ContributionType.CONTRIBUTOR] manifest.nextflowVersion == '1.2.3' manifest.name == 'foo' manifest.organization == 'My Organization' diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ApptainerBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ApptainerBuilderTest.groovy index 45277067dd..4428b0a069 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ApptainerBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ApptainerBuilderTest.groovy @@ -40,13 +40,11 @@ class ApptainerBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid busybox' - new ApptainerBuilder('busybox') - .params(engineOptions: '-q -v') + new ApptainerBuilder('busybox', new ApptainerConfig(engineOptions: '-q -v')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer -q -v run --no-home --pid busybox' - new ApptainerBuilder('busybox') - .params(runOptions: '--contain --writable') + new ApptainerBuilder('busybox', new ApptainerConfig(runOptions: '--contain --writable')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid --contain --writable busybox' @@ -55,31 +53,27 @@ class ApptainerBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path2) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid -B /foo/data/file1 -B /bar/data/file2 -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid -B /foo/data/file1 -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .params(readOnlyInputs: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid -B /foo/data/file1:/foo/data/file1:ro -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path3) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer run --no-home --pid -B /bar/data\\ file -B "$NXF_TASK_WORKDIR" ubuntu' @@ -104,47 +98,40 @@ class ApptainerBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B "$NXF_TASK_WORKDIR" busybox' - new ApptainerBuilder('busybox') - .params(engineOptions: '-q -v') + new ApptainerBuilder('busybox', new ApptainerConfig(engineOptions: '-q -v')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer -q -v exec --no-home --pid -B "$NXF_TASK_WORKDIR" busybox' - new ApptainerBuilder('busybox') - .params(runOptions: '--contain --writable') + new ApptainerBuilder('busybox', new ApptainerConfig(runOptions: '--contain --writable')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B "$NXF_TASK_WORKDIR" --contain --writable busybox' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: false)) .addMount(path1) - .params(autoMounts: false) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path2) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B /foo/data/file1 -B /bar/data/file2 -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B /foo/data/file1 -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .params(readOnlyInputs: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B /foo/data/file1:/foo/data/file1:ro -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') + new ApptainerBuilder('ubuntu', new ApptainerConfig(autoMounts: true)) .addMount(path3) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B /bar/data\\ file -B "$NXF_TASK_WORKDIR" ubuntu' @@ -153,8 +140,7 @@ class ApptainerBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home -B "$NXF_TASK_WORKDIR" ubuntu' - new ApptainerBuilder('ubuntu') - .params(oci: true) + new ApptainerBuilder('ubuntu', new ApptainerConfig(ociMode: true)) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+APPTAINERENV_TMP="$TMP"} ${TMPDIR:+APPTAINERENV_TMPDIR="$TMPDIR"} apptainer exec --no-home --pid -B "$NXF_TASK_WORKDIR" ubuntu' @@ -238,7 +224,7 @@ class ApptainerBuilderTest extends Specification { def 'test apptainer env'() { given: - def builder = Spy(ApptainerBuilder) + def builder = new ApptainerBuilder('busybox') expect: builder.makeEnv(ENV).toString() == RESULT diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ApptainerCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ApptainerCacheTest.groovy index 6eb851f487..8e350be7c9 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ApptainerCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ApptainerCacheTest.groovy @@ -33,7 +33,7 @@ class ApptainerCacheTest extends Specification { def 'should return a simple name given an image url'() { given: - def helper = new ApptainerCache(Mock(ContainerConfig)) + def helper = new ApptainerCache(new ApptainerConfig([:])) expect: helper.simpleName(url) == expected @@ -57,7 +57,7 @@ class ApptainerCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new ApptainerCache([libraryDir: "$dir"] as ContainerConfig) + def cache = new ApptainerCache([libraryDir: "$dir"] as ApptainerConfig) then: cache.getLibraryDir() == dir @@ -71,7 +71,7 @@ class ApptainerCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new ApptainerCache(GroovyMock(ContainerConfig), [NXF_APPTAINER_LIBRARYDIR: "$dir"]) + def cache = new ApptainerCache(new ApptainerConfig([:]), [NXF_APPTAINER_LIBRARYDIR: "$dir"]) then: cache.getLibraryDir() == dir @@ -85,7 +85,7 @@ class ApptainerCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new ApptainerCache([cacheDir: "$dir"] as ContainerConfig) + def cache = new ApptainerCache([cacheDir: "$dir"] as ApptainerConfig) then: cache.getCacheDir() == dir @@ -99,7 +99,7 @@ class ApptainerCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new ApptainerCache(GroovyMock(ContainerConfig), [NXF_APPTAINER_CACHEDIR: "$dir"]) + def cache = new ApptainerCache(new ApptainerConfig([:]), [NXF_APPTAINER_CACHEDIR: "$dir"]) then: cache.getCacheDir() == dir @@ -116,7 +116,7 @@ class ApptainerCacheTest extends Specification { def LOCAL = 'foo-latest.img' def TARGET_FILE = dir.resolve(LOCAL) def TEMP_FILE = dir.resolve('foo-latest.pulling'); TEMP_FILE.text = 'foo' - ContainerConfig config = [noHttps: true] + ApptainerConfig config = [noHttps: true] and: def cache = Spy(new ApptainerCache(config)) @@ -148,7 +148,7 @@ class ApptainerCacheTest extends Specification { def container = dir.resolve(LOCAL) container.text = 'dummy' and: - def cache = Spy(new ApptainerCache([:] as ContainerConfig)) + def cache = Spy(new ApptainerCache([:] as ApptainerConfig)) when: def result = cache.downloadContainerImage(IMAGE) @@ -172,7 +172,7 @@ class ApptainerCacheTest extends Specification { def container = dir.resolve(LOCAL) container.text = 'dummy' and: - def cache = Spy(new ApptainerCache([:] as ContainerConfig)) + def cache = Spy(new ApptainerCache([:] as ApptainerConfig)) when: def result = cache.downloadContainerImage(IMAGE) @@ -197,7 +197,7 @@ class ApptainerCacheTest extends Specification { def dir = Paths.get('/test/path') def container = dir.resolve(LOCAL) and: - def cache = Spy(new ApptainerCache([:] as ContainerConfig)) + def cache = Spy(new ApptainerCache([:] as ApptainerConfig)) when: def file = cache.getCachePathFor(IMAGE) diff --git a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy index ba8e1b77bb..d948021405 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy @@ -40,24 +40,20 @@ class CharliecloudBuilderTest extends Specification { .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/busybox') - .params(writeFake: false) + new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(writeFake: false)) .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/busybox') - .params(writeFake: false) + new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(writeFake: false)) .params(readOnlyInputs: true) .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/busybox') - .params(runOptions: '-j --no-home') + new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(runOptions: '-j --no-home')) .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" -j --no-home /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/busybox') - .params(temp: '/foo') + new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(temp: '/foo')) .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b /foo:/tmp -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' @@ -90,16 +86,16 @@ class CharliecloudBuilderTest extends Specification { cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') - .params(writeFake: 'true') + def config = new CharliecloudConfig(writeFake: true) + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .build() .getRunCommand() then: cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu --' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') - .params(writeFake: 'false') + config = new CharliecloudConfig(writeFake: false) + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .build() .getRunCommand() then: @@ -116,7 +112,7 @@ class CharliecloudBuilderTest extends Specification { when: cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') - .params(readOnlyInputs: 'true') + .params(readOnlyInputs: true) .build() .getRunCommand('bwa --this --that file.fastq') then: @@ -125,27 +121,27 @@ class CharliecloudBuilderTest extends Specification { when: cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') - .params(readOnlyInputs: 'false') + .params(readOnlyInputs: false) .build() .getRunCommand('bwa --this --that file.fastq') then: cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + config = new CharliecloudConfig(writeFake: false) + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .params(entry:'/bin/sh') - .params(readOnlyInputs: 'false') - .params(writeFake: 'false') + .params(readOnlyInputs: false) .build() .getRunCommand('bwa --this --that file.fastq') then: cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + config = new CharliecloudConfig(writeFake: false) + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .params(entry:'/bin/sh') - .params(readOnlyInputs: 'true') - .params(writeFake: 'false') + .params(readOnlyInputs: true) .addMount(db_file) .addMount(db_file) .build().getRunCommand('bwa --this --that file.fastq') @@ -155,9 +151,9 @@ class CharliecloudBuilderTest extends Specification { when: cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') + .params(readOnlyInputs: false) .addMount(db_file) .addMount(db_file) - .params(readOnlyInputs: 'false') .build() .getRunCommand('bwa --this --that file.fastq') then: @@ -168,7 +164,7 @@ class CharliecloudBuilderTest extends Specification { def 'test charliecloud env'() { given: - def builder = Spy(CharliecloudBuilder) + def builder = new CharliecloudBuilder('/cacheDir/ubuntu') expect: builder.makeEnv(ENV).toString() == RESULT diff --git a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy index cbb4f236d1..21c463e1c6 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudCacheTest.groovy @@ -33,7 +33,7 @@ class CharliecloudCacheTest extends Specification { def 'should return a simple name given an image url'() { given: - def helper = new CharliecloudCache(Mock(ContainerConfig)) + def helper = new CharliecloudCache(Mock(CharliecloudConfig)) expect: helper.simpleName(url) == expected @@ -70,7 +70,7 @@ class CharliecloudCacheTest extends Specification { def cacheDir = dir.resolve('nxf.ch') when: - def cache = new CharliecloudCache([cacheDir: "$cacheDir"] as ContainerConfig) + def cache = new CharliecloudCache([cacheDir: "$cacheDir"] as CharliecloudConfig) then: cache.getCacheDir() == cacheDir @@ -86,7 +86,7 @@ class CharliecloudCacheTest extends Specification { def cacheDir = dir.resolve('nxf.ch') when: - def cache = new CharliecloudCache(GroovyMock(ContainerConfig), [NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir"]) + def cache = new CharliecloudCache(new CharliecloudConfig([:]), [NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir"]) then: cache.getCacheDir() == cacheDir @@ -104,7 +104,7 @@ class CharliecloudCacheTest extends Specification { def charliecloudCacheDir = dir.resolve('charliecloud') when: - def cache = new CharliecloudCache([cacheDir: "$cacheDir"] as ContainerConfig, [CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) + def cache = new CharliecloudCache([cacheDir: "$cacheDir"] as CharliecloudConfig, [CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) then: cache.getCacheDir() == charliecloudCacheDir @@ -123,7 +123,7 @@ class CharliecloudCacheTest extends Specification { def charliecloudCacheDir = dir.resolve('charliecloud') when: - def cache = new CharliecloudCache(GroovyMock(ContainerConfig), [NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) + def cache = new CharliecloudCache(new CharliecloudConfig([:]), [NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$charliecloudCacheDir"]) then: cache.getCacheDir() == charliecloudCacheDir @@ -140,7 +140,7 @@ class CharliecloudCacheTest extends Specification { def cacheDir = dir.resolve('nxf.ch') when: - def cache = new CharliecloudCache([cacheDir: "$cacheDir", writeFake: 'false'] as ContainerConfig, [CH_IMAGE_STORAGE: "$cacheDir"]) + def cache = new CharliecloudCache([cacheDir: "$cacheDir", writeFake: false] as CharliecloudConfig, [CH_IMAGE_STORAGE: "$cacheDir"]) and: cache.getCacheDir() @@ -160,7 +160,7 @@ class CharliecloudCacheTest extends Specification { def cacheDir = dir.resolve('nxf.ch') when: - def cache = new CharliecloudCache([writeFake: 'false'] as ContainerConfig, [ NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$cacheDir" ]) + def cache = new CharliecloudCache([writeFake: false] as CharliecloudConfig, [ NXF_CHARLIECLOUD_CACHEDIR: "$cacheDir", CH_IMAGE_STORAGE: "$cacheDir" ]) and: cache.getCacheDir() @@ -181,7 +181,7 @@ class CharliecloudCacheTest extends Specification { def CACHE_PATH = dir.resolve('charliecloud') def TARGET_PATH = CACHE_PATH.resolve(LOCAL) and: - def cache = Spy(new CharliecloudCache([:] as ContainerConfig)) + def cache = Spy(new CharliecloudCache([:] as CharliecloudConfig)) when: def result = cache.downloadCharliecloudImage(IMAGE) @@ -207,7 +207,7 @@ class CharliecloudCacheTest extends Specification { def CACHE_PATH = dir.resolve('charliecloud') def TARGET_PATH = CACHE_PATH.resolve(LOCAL) and: - def cache = Spy(new CharliecloudCache([:] as ContainerConfig)) + def cache = Spy(new CharliecloudCache([:] as CharliecloudConfig)) TARGET_PATH.mkdirs() when: @@ -232,7 +232,7 @@ class CharliecloudCacheTest extends Specification { def dir = Paths.get('/test/path') def container = dir.resolve(LOCAL) and: - def cache = Spy(new CharliecloudCache([:] as ContainerConfig)) + def cache = Spy(new CharliecloudCache([:] as CharliecloudConfig)) when: def file = cache.getCachePathFor(IMAGE) diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ContainerBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ContainerBuilderTest.groovy index 14d5403fdb..8358b3dc4f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ContainerBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ContainerBuilderTest.groovy @@ -71,32 +71,20 @@ class ContainerBuilderTest extends Specification { def IMAGE = 'foo:latest' when: - def builder = ContainerBuilder.create(ENGINE,IMAGE) + def builder = ContainerBuilder.create(CONFIG,IMAGE) then: builder.class == CLAZZ builder.getImage() == IMAGE where: - ENGINE | CLAZZ - 'docker' | DockerBuilder - 'podman' | PodmanBuilder - 'singularity' | SingularityBuilder - 'apptainer' | ApptainerBuilder - 'sarus' | SarusBuilder - 'shifter' | ShifterBuilder - 'charliecloud' | CharliecloudBuilder - 'udocker' | UdockerBuilder - - } - - def 'should throw illegal arg' () { - - when: - ContainerBuilder.create('foo','image:any') - - then: - def e = thrown(IllegalArgumentException) - e.message == 'Unknown container engine: foo' + CONFIG | CLAZZ + new DockerConfig() | DockerBuilder + new PodmanConfig() | PodmanBuilder + new SingularityConfig() | SingularityBuilder + new ApptainerConfig() | ApptainerBuilder + new SarusConfig() | SarusBuilder + new ShifterConfig() | ShifterBuilder + new CharliecloudConfig() | CharliecloudBuilder } diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ContainerConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ContainerConfigTest.groovy index 30651ff32e..4ab13906c6 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ContainerConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ContainerConfigTest.groovy @@ -16,6 +16,7 @@ package nextflow.container +import nextflow.SysEnv import spock.lang.Specification import spock.lang.Unroll @@ -28,7 +29,7 @@ class ContainerConfigTest extends Specification { @Unroll def 'should return env whitelist for=#VAL' () { when: - def cfg = new ContainerConfig(envWhitelist: VAL) + def cfg = new DockerConfig(envWhitelist: VAL) then: cfg.getEnvWhitelist() == EXPECTED @@ -46,74 +47,53 @@ class ContainerConfigTest extends Specification { def 'should validate legacy entry point' () { when: - def cfg = new ContainerConfig(OPTS, ENV) + SysEnv.push(ENV) + def cfg = new DockerConfig(OPTS) + def result = cfg.entrypointOverride() + SysEnv.pop() then: - cfg.entrypointOverride() == EXPECTED - + result == EXPECTED + where: - OPTS | ENV | EXPECTED - [:] | [:] | false - [entrypointOverride: false] | [:] | false - [entrypointOverride: true] | [:] | true + OPTS | ENV | EXPECTED + [:] | [:] | false and: - [:] | [NXF_CONTAINER_ENTRYPOINT_OVERRIDE: 'true'] | true - [entrypointOverride: false] | [NXF_CONTAINER_ENTRYPOINT_OVERRIDE: 'true'] | false + [:] | [NXF_CONTAINER_ENTRYPOINT_OVERRIDE: 'true'] | true } @Unroll - def 'should validate oci mode and direct mode' () { + def 'should validate oci auto-pull mode' () { - when: - def cfg = new ContainerConfig(OPTS) - then: - cfg.isSingularityOciMode() == OCI_MODE - cfg.canRunOciImage() == AUTO_PULL + expect: + CONFIG.canRunOciImage() == OCI_AUTO_PULL where: - OPTS | OCI_MODE | AUTO_PULL - [:] | false | false - [oci:true] | false | false - [oci:false] | false | false - [ociMode:true] | false | false - and: - [engine:'docker', oci:true] | false | false - [engine:'singularity'] | false | false - [engine:'singularity', oci:false] | false | false - [engine:'singularity', ociAutoPull:false] | false | false + CONFIG | OCI_AUTO_PULL + new SingularityConfig([:]) | false + new SingularityConfig(ociAutoPull:false) | false and: - [engine:'singularity', oci:true] | true | true - [engine:'singularity', ociMode:true] | true | true - [engine:'apptainer', oci:true] | false | false - [engine:'apptainer', ociMode:true] | false | false + new SingularityConfig(ociMode:true) | true + new ApptainerConfig(ociMode:true) | false and: - [engine:'singularity', ociAutoPull:true] | false | true - [engine:'apptainer', ociAutoPull:true] | false | true + new SingularityConfig(ociAutoPull:true) | true + new ApptainerConfig(ociAutoPull:true) | true } def 'should get fusion options' () { - when: - def cfg = new ContainerConfig(OPTS) + expect: + CONFIG.getFusionOptions() == EXPECTED - then: - cfg.fusionOptions() == EXPECTED - where: - OPTS | EXPECTED - [:] | null - [engine:'docker'] | '--rm --privileged' - [engine:'podman'] | '--rm --privileged' - and: - [engine: 'singularity'] | null - [engine: 'singularity', ociMode:true] | '-B /dev/fuse' - [engine: 'singularity', ociAutoPull: true] | null - [engine: 'apptainer', oci:true] | null - and: - [engine:'docker', fusionOptions:'--cap-add foo']| '--cap-add foo' - [engine:'podman', fusionOptions:'--cap-add bar']| '--cap-add bar' + CONFIG | EXPECTED + new DockerConfig([:]) | '--rm --privileged' + new PodmanConfig([:]) | '--rm --privileged' and: - [engine:'sarus', fusionOptions:'--other'] | '--other' + new SingularityConfig([:]) | null + new SingularityConfig(ociMode:true) | '-B /dev/fuse' + new SingularityConfig(ociAutoPull:true) | null + new ApptainerConfig(oci:true) | null } } diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ContainerHandlerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ContainerHandlerTest.groovy index 0483a11447..c2684b75dc 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ContainerHandlerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ContainerHandlerTest.groovy @@ -44,7 +44,8 @@ class ContainerHandlerTest extends Specification { def 'test normalize docker image name' () { given: - def n = new ContainerHandler([registry: REGISTRY, registryOverride: OVERRIDE]) + def config = new DockerConfig(registry: REGISTRY, registryOverride: OVERRIDE) + def n = new ContainerHandler(config) expect: n.normalizeDockerImageName(IMAGE) == EXPECTED @@ -66,7 +67,7 @@ class ContainerHandlerTest extends Specification { def 'test normalize shifter image name' () { given: - def n = new ContainerHandler([:]) + def n = new ContainerHandler(new ShifterConfig([:])) expect: n.normalizeShifterImageName(image) == expected @@ -87,7 +88,8 @@ class ContainerHandlerTest extends Specification { def 'test normalize singularity image #image' () { given: - def n = new ContainerHandler([registry: registry], Paths.get('/root/dir')) + def config = new SingularityConfig(registry: registry) + def n = new ContainerHandler(config, Paths.get('/root/dir')) expect: n.normalizeSingularityImageName(image) == expected @@ -124,7 +126,7 @@ class ContainerHandlerTest extends Specification { def foo = base.resolve('foo'); foo.mkdir() def bar = Files.createFile(foo.resolve('bar')) def img = Files.createFile(base.resolve('bar.img')) - def n = new ContainerHandler([:], base) + def n = new ContainerHandler(new SingularityConfig([:]), base) expect: n.normalizeSingularityImageName('foo/bar') == bar.toAbsolutePath().toString() @@ -139,7 +141,8 @@ class ContainerHandlerTest extends Specification { @Unroll def 'test normalize method for docker' () { given: - def n = Spy(new ContainerHandler([engine: 'docker', enabled: true, registry: registry])) + def config = new DockerConfig(enabled: true, registry: registry) + def n = Spy(new ContainerHandler(config)) when: def result = n.normalizeImageName(image) @@ -163,7 +166,8 @@ class ContainerHandlerTest extends Specification { def 'test normalize method for shifter' () { given: - def n = Spy(new ContainerHandler([engine: 'shifter', enabled: true])) + def config = new ShifterConfig(enabled: true) + def n = Spy(new ContainerHandler(config)) when: def result = n.normalizeImageName(image) @@ -188,7 +192,8 @@ class ContainerHandlerTest extends Specification { given: def EXECUTOR = Mock(Executor) def IMAGE = 'foo:latest' - def handler = Spy(new ContainerHandler([engine: 'shifter', enabled: true])) + def config = new ShifterConfig(enabled: true) + def handler = Spy(new ContainerHandler(config)) when: def result = handler.normalizeImageName(IMAGE) @@ -203,8 +208,9 @@ class ContainerHandlerTest extends Specification { @Unroll def 'test normalize method for charliecloud' () { - given: - def n = new ContainerHandler([registry: registry]) + given: + def config = new CharliecloudConfig(registry: registry) + def n = new ContainerHandler(config) expect: n.normalizeCharliecloudImageName(image) == expected @@ -233,7 +239,8 @@ class ContainerHandlerTest extends Specification { def 'test normalize method for singularity' () { given: def BASE = Paths.get('/abs/path/') - def handler = Spy(new ContainerHandler(engine: 'singularity', enabled: true, ociMode: OCI, baseDir: BASE)) + def config = new SingularityConfig(enabled: true, ociMode: OCI) + def handler = Spy(new ContainerHandler(config, BASE)) when: def result = handler.normalizeImageName(IMAGE) @@ -265,7 +272,8 @@ class ContainerHandlerTest extends Specification { def 'test normalize method for OCI direct mode' () { given: def BASE = Paths.get('/abs/path/') - def handler = Spy(new ContainerHandler(engine: 'apptainer', enabled: true, ociAutoPull:AUTO, baseDir: BASE)) + def config = new ApptainerConfig(enabled: true, ociAutoPull: AUTO) + def handler = Spy(new ContainerHandler(config, BASE)) when: def result = handler.normalizeImageName(IMAGE) @@ -311,19 +319,21 @@ class ContainerHandlerTest extends Specification { def 'should not invoke caching when engine is disabled' () { given: - final handler = Spy(new ContainerHandler([engine: 'singularity'])) final IMAGE = 'docker://foo.img' + final config = Spy(SingularityConfig) + final handler = Spy(new ContainerHandler(config)) + def result when: - handler.config.enabled = false - def result = handler.normalizeImageName(IMAGE) + config.enabled >> false + result = handler.normalizeImageName(IMAGE) then: 1 * handler.normalizeSingularityImageName(IMAGE) >> IMAGE 0 * handler.createSingularityCache(_,_) >> null result == IMAGE when: - handler.config.enabled = true + config.enabled >> true result = handler.normalizeImageName(IMAGE) then: 1 * handler.normalizeSingularityImageName(IMAGE) >> IMAGE @@ -333,7 +343,8 @@ class ContainerHandlerTest extends Specification { def 'should invoke singularity cache' () { given: - def handler = Spy(ContainerHandler,constructorArgs:[[engine: 'singularity', enabled: true]]) + def config = new SingularityConfig(enabled: true) + def handler = Spy(new ContainerHandler(config)) when: def result = handler.normalizeImageName(IMG) diff --git a/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy index e71995a842..eaf2742443 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy @@ -32,7 +32,7 @@ class DockerBuilderTest extends Specification { def 'test docker mounts'() { given: - def builder = Spy(DockerBuilder) + def builder = new DockerBuilder('busybox') def files = [Paths.get('/folder/data'), Paths.get('/folder/db'), Paths.get('/folder/db') ] def real = [ Paths.get('/user/yo/nextflow/bin'), Paths.get('/user/yo/nextflow/work'), Paths.get('/db/pdb/local/data') ] def quotes = [ Paths.get('/folder with blanks/A'), Paths.get('/folder with blanks/B') ] @@ -49,7 +49,7 @@ class DockerBuilderTest extends Specification { def 'test docker env'() { given: - def builder = Spy(DockerBuilder) + def builder = new DockerBuilder('busybox') expect: builder.makeEnv(ENV).toString() == EXPECT @@ -77,13 +77,11 @@ class DockerBuilderTest extends Specification { .build() .runCommand == 'docker run -i -e "FOO=1" -e "BAR=hello world" -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('ubuntu') - .params(temp:'/hola') + new DockerBuilder('ubuntu', new DockerConfig(temp:'/hola')) .build() .runCommand == 'docker run -i -v /hola:/tmp -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" ubuntu' - new DockerBuilder('busybox') - .params(sudo: true) + new DockerBuilder('busybox', new DockerConfig(sudo: true)) .build() .runCommand == 'sudo docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" busybox' @@ -92,8 +90,7 @@ class DockerBuilderTest extends Specification { .build() .runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --entrypoint /bin/bash busybox' - new DockerBuilder('busybox') - .params(runOptions: '-x --zeta') + new DockerBuilder('busybox', new DockerConfig(runOptions: '-x --zeta')) .build() .runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" -x --zeta busybox' @@ -102,8 +99,7 @@ class DockerBuilderTest extends Specification { .build() .runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name hola busybox' - new DockerBuilder('busybox') - .params(engineOptions: '--tlsverify --tlscert="/path/to/my/cert"') + new DockerBuilder('busybox', new DockerConfig(engineOptions: '--tlsverify --tlscert="/path/to/my/cert"')) .build() .runCommand == 'docker --tlsverify --tlscert="/path/to/my/cert" run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" busybox' @@ -121,8 +117,7 @@ class DockerBuilderTest extends Specification { .runCommand == 'docker run -i -v /home/db:/home/db:ro -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('fedora') - .params(mountFlags: 'Z') + new DockerBuilder('fedora', new DockerConfig(mountFlags: 'Z')) .addMount(db_file) .build() .runCommand == 'docker run -i -v /home/db:/home/db:Z -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR":Z -w "$NXF_TASK_WORKDIR" fedora' @@ -147,15 +142,13 @@ class DockerBuilderTest extends Specification { .build() .runCommand == 'docker run -i --cpu-shares 8192 --cpuset-cpus 1,2 -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('fedora') - .params(legacy: true) + new DockerBuilder('fedora', new DockerConfig(legacy: true)) .setCpuset('1,2') .build() .runCommand == 'docker run -i --cpuset 1,2 -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('fedora') - .params(legacy: true) + new DockerBuilder('fedora', new DockerConfig(legacy: true)) .setCpus(1) .build() .runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' @@ -224,6 +217,7 @@ class DockerBuilderTest extends Specification { def 'test get commands'() { when: + def config def docker = new DockerBuilder('busybox').setName('c1').build() then: docker.runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name c1 busybox' @@ -231,7 +225,8 @@ class DockerBuilderTest extends Specification { docker.killCommand == 'docker stop c1' when: - docker = new DockerBuilder('busybox').setName('c2').params(sudo: true, remove: true).build() + config = new DockerConfig(sudo: true, remove: true) + docker = new DockerBuilder('busybox', config).setName('c2').build() then: docker.runCommand == 'sudo docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name c2 busybox' docker.removeCommand == 'sudo docker rm c2' @@ -239,7 +234,8 @@ class DockerBuilderTest extends Specification { when: - docker = new DockerBuilder('busybox').setName('c3').params(remove: true).build() + config = new DockerConfig(remove: true) + docker = new DockerBuilder('busybox', config).setName('c3').build() then: docker.runCommand == 'docker run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name c3 busybox' docker.removeCommand == 'docker rm c3' @@ -251,7 +247,8 @@ class DockerBuilderTest extends Specification { docker.killCommand == 'docker kill -s SIGKILL c4' when: - docker = new DockerBuilder('busybox').setName('c5').params(kill: false,remove: false).build() + config = new DockerConfig(remove: false) + docker = new DockerBuilder('busybox', config).setName('c5').params(kill: false).build() then: docker.killCommand == null docker.removeCommand == null @@ -281,7 +278,8 @@ class DockerBuilderTest extends Specification { def 'should return mount flags'() { given: - def builder = new DockerBuilder().params(mountFlags: flags) + def config = new DockerConfig(mountFlags: flags) + def builder = new DockerBuilder('busybox', config) expect: builder.mountFlags(readOnly) == expected diff --git a/modules/nextflow/src/test/groovy/nextflow/container/PodmanBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/PodmanBuilderTest.groovy index 17689753b0..75dbf79dc8 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/PodmanBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/PodmanBuilderTest.groovy @@ -34,7 +34,7 @@ class PodmanBuilderTest extends Specification { def 'test podman mounts'() { given: - def builder = Spy(PodmanBuilder) + def builder = new PodmanBuilder('busybox') def files = [Paths.get('/folder/data'), Paths.get('/folder/db'), Paths.get('/folder/db') ] def real = [ Paths.get('/user/yo/nextflow/bin'), Paths.get('/user/yo/nextflow/work'), Paths.get('/db/pdb/local/data') ] def quotes = [ Paths.get('/folder with blanks/A'), Paths.get('/folder with blanks/B') ] @@ -53,7 +53,7 @@ class PodmanBuilderTest extends Specification { def 'test podman env'() { given: - def builder = Spy(PodmanBuilder) + def builder = new PodmanBuilder('busybox') expect: builder.makeEnv(ENV).toString() == EXPECT @@ -81,8 +81,7 @@ class PodmanBuilderTest extends Specification { .build() .runCommand == 'podman run -i -e "FOO=1" -e "BAR=hello world" -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new PodmanBuilder('ubuntu') - .params(temp:'/hola') + new PodmanBuilder('ubuntu', new PodmanConfig(temp: '/hola')) .build() .runCommand == 'podman run -i -v /hola:/tmp -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" ubuntu' @@ -91,8 +90,7 @@ class PodmanBuilderTest extends Specification { .build() .runCommand == 'podman run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --entrypoint /bin/bash busybox' - new PodmanBuilder('busybox') - .params(runOptions: '-x --zeta') + new PodmanBuilder('busybox', new PodmanConfig(runOptions: '-x --zeta')) .build() .runCommand == 'podman run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" -x --zeta busybox' @@ -101,8 +99,7 @@ class PodmanBuilderTest extends Specification { .build() .runCommand == 'podman run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name hola busybox' - new PodmanBuilder('busybox') - .params(engineOptions: '--tls-verify=false --cert-dir "/path/to/my/cert-dir"') + new PodmanBuilder('busybox', new PodmanConfig(engineOptions: '--tls-verify=false --cert-dir "/path/to/my/cert-dir"')) .build() .runCommand == 'podman --tls-verify=false --cert-dir "/path/to/my/cert-dir" run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" busybox' @@ -119,8 +116,7 @@ class PodmanBuilderTest extends Specification { .build() .runCommand == 'podman run -i -v /home/db:/home/db:ro -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new PodmanBuilder('fedora') - .params(mountFlags: 'Z') + new PodmanBuilder('fedora', new PodmanConfig(mountFlags: 'Z')) .addMount(db_file) .build() .runCommand == 'podman run -i -v /home/db:/home/db:Z -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR":Z -w "$NXF_TASK_WORKDIR" fedora' @@ -156,6 +152,7 @@ class PodmanBuilderTest extends Specification { def 'test get commands'() { when: + def config def podman = new PodmanBuilder('busybox').setName('c1').build() then: podman.runCommand == 'podman run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name c1 busybox' @@ -163,7 +160,8 @@ class PodmanBuilderTest extends Specification { podman.killCommand == 'podman stop c1' when: - podman = new PodmanBuilder('busybox').setName('c3').params(remove: true).build() + config = new PodmanConfig(remove: true) + podman = new PodmanBuilder('busybox', config).setName('c3').build() then: podman.runCommand == 'podman run -i -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name c3 busybox' podman.removeCommand == 'podman rm c3' @@ -175,7 +173,8 @@ class PodmanBuilderTest extends Specification { podman.killCommand == 'podman kill -s SIGKILL c4' when: - podman = new PodmanBuilder('busybox').setName('c5').params(kill: false,remove: false).build() + config = new PodmanConfig(remove: false) + podman = new PodmanBuilder('busybox', config).setName('c5').params(kill: false).build() then: podman.killCommand == null podman.removeCommand == null @@ -205,7 +204,8 @@ class PodmanBuilderTest extends Specification { def 'should return mount flags'() { given: - def builder = new PodmanBuilder().params(mountFlags: flags) + def config = new PodmanConfig(mountFlags: flags) + def builder = new PodmanBuilder('busybox', config) expect: builder.mountFlags(readOnly) == expected diff --git a/modules/nextflow/src/test/groovy/nextflow/container/SarusBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/SarusBuilderTest.groovy index 1f9037b582..075f0d9475 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/SarusBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/SarusBuilderTest.groovy @@ -21,7 +21,7 @@ import java.nio.file.Paths * * @author Marco De La Pierre */ -class SarusrBuilderTest extends Specification { +class SarusBuilderTest extends Specification { def 'test sarus env'() { @@ -40,8 +40,7 @@ class SarusrBuilderTest extends Specification { .build() .@runCommand == 'sarus run --mount=type=bind,source="$NXF_TASK_WORKDIR",destination="$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" busybox' - new SarusBuilder('busybox') - .params(verbose: true) + new SarusBuilder('busybox', new SarusConfig(verbose: true)) .build() .@runCommand == 'sarus --verbose run --mount=type=bind,source="$NXF_TASK_WORKDIR",destination="$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" busybox' @@ -51,8 +50,7 @@ class SarusrBuilderTest extends Specification { .build() .@runCommand == 'sarus run -e "VAR_X=1" -e "VAR_Y=2" -e "VAR_Z=3" --mount=type=bind,source="$NXF_TASK_WORKDIR",destination="$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new SarusBuilder('busybox') - .params(runOptions: '-x --zeta') + new SarusBuilder('busybox', new SarusConfig(runOptions: '-x --zeta')) .build() .@runCommand == 'sarus run --mount=type=bind,source="$NXF_TASK_WORKDIR",destination="$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" -x --zeta busybox' diff --git a/modules/nextflow/src/test/groovy/nextflow/container/ShifterBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/ShifterBuilderTest.groovy index 07c72be684..bc2c7f00e5 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/ShifterBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/ShifterBuilderTest.groovy @@ -39,8 +39,7 @@ class ShifterBuilderTest extends Specification { .build() .@runCommand == 'shifter --image busybox' - new ShifterBuilder('busybox') - .params(verbose: true) + new ShifterBuilder('busybox', new ShifterConfig(verbose: true)) .build() .@runCommand == 'shifter --verbose --image busybox' diff --git a/modules/nextflow/src/test/groovy/nextflow/container/SingularityBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/SingularityBuilderTest.groovy index f4b8013669..f7949946be 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/SingularityBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/SingularityBuilderTest.groovy @@ -41,13 +41,11 @@ class SingularityBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid busybox' - new SingularityBuilder('busybox') - .params(engineOptions: '-q -v') + new SingularityBuilder('busybox', new SingularityConfig(engineOptions: '-q -v')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity -q -v run --no-home --pid busybox' - new SingularityBuilder('busybox') - .params(runOptions: '--contain --writable') + new SingularityBuilder('busybox', new SingularityConfig(runOptions: '--contain --writable')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid --contain --writable busybox' @@ -56,31 +54,27 @@ class SingularityBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path2) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid -B /foo/data/file1 -B /bar/data/file2 -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid -B /foo/data/file1 -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .params(readOnlyInputs: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid -B /foo/data/file1:/foo/data/file1:ro -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path3) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity run --no-home --pid -B /bar/data\\ file -B "$NXF_TASK_WORKDIR" ubuntu' @@ -105,62 +99,48 @@ class SingularityBuilderTest extends Specification { .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B "$NXF_TASK_WORKDIR" busybox' - new SingularityBuilder('busybox') - .params(engineOptions: '-q -v') + new SingularityBuilder('busybox', new SingularityConfig(engineOptions: '-q -v')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity -q -v exec --no-home --pid -B "$NXF_TASK_WORKDIR" busybox' - new SingularityBuilder('busybox') - .params(runOptions: '--contain --writable') + new SingularityBuilder('busybox', new SingularityConfig(runOptions: '--contain --writable')) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B "$NXF_TASK_WORKDIR" --contain --writable busybox' - new SingularityBuilder('ubuntu') - .params(autoMounts: false) + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: false)) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path2) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B /foo/data/file1 -B /bar/data/file2 -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B /foo/data/file1 -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path1) .addMount(path1) - .params(autoMounts: true) .params(readOnlyInputs: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B /foo/data/file1:/foo/data/file1:ro -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: true)) .addMount(path3) - .params(autoMounts: true) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home --pid -B /bar/data\\ file -B "$NXF_TASK_WORKDIR" ubuntu' - new SingularityBuilder('ubuntu') + new SingularityBuilder('ubuntu', new SingularityConfig(autoMounts: false)) .params(newPidNamespace: false) - .params(autoMounts: false) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} singularity exec --no-home ubuntu' - new SingularityBuilder('ubuntu') - .params(oci: true) - .build() - .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${XDG_RUNTIME_DIR:+XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"} ${DBUS_SESSION_BUS_ADDRESS:+DBUS_SESSION_BUS_ADDRESS="$DBUS_SESSION_BUS_ADDRESS"} singularity exec --no-home --oci -B "$NXF_TASK_WORKDIR" ubuntu' - - new SingularityBuilder('ubuntu') - .params(ociMode: true) + new SingularityBuilder('ubuntu', new SingularityConfig(ociMode: true)) .build() .runCommand == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${XDG_RUNTIME_DIR:+XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"} ${DBUS_SESSION_BUS_ADDRESS:+DBUS_SESSION_BUS_ADDRESS="$DBUS_SESSION_BUS_ADDRESS"} singularity exec --no-home --oci -B "$NXF_TASK_WORKDIR" ubuntu' @@ -245,7 +225,7 @@ class SingularityBuilderTest extends Specification { def 'test singularity env'() { given: - def builder = Spy(SingularityBuilder) + def builder = new SingularityBuilder('busybox') expect: builder.makeEnv(ENV).toString() == RESULT @@ -264,7 +244,7 @@ class SingularityBuilderTest extends Specification { @Unroll def 'should quote env value' () { given: - def builder = Spy(SingularityBuilder) + def builder = new SingularityBuilder('busybox') expect: builder.quoteValue(STR) == EXPECTED diff --git a/modules/nextflow/src/test/groovy/nextflow/container/SingularityCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/SingularityCacheTest.groovy index 33b16a1c12..e80ffe3e44 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/SingularityCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/SingularityCacheTest.groovy @@ -33,7 +33,7 @@ class SingularityCacheTest extends Specification { def 'should return a simple name given an image url'() { given: - def helper = new SingularityCache(Mock(ContainerConfig)) + def helper = SingularityCache.create(new SingularityConfig([:])) expect: helper.simpleName(url) == expected @@ -57,7 +57,7 @@ class SingularityCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new SingularityCache([libraryDir: "$dir"] as ContainerConfig) + def cache = SingularityCache.create(new SingularityConfig(libraryDir: "$dir")) then: cache.getLibraryDir() == dir @@ -71,7 +71,7 @@ class SingularityCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new SingularityCache(GroovyMock(ContainerConfig), [NXF_SINGULARITY_LIBRARYDIR: "$dir"]) + def cache = SingularityCache.create(new SingularityConfig([:]), [NXF_SINGULARITY_LIBRARYDIR: "$dir"]) then: cache.getLibraryDir() == dir @@ -85,7 +85,7 @@ class SingularityCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new SingularityCache([cacheDir: "$dir"] as ContainerConfig) + def cache = SingularityCache.create(new SingularityConfig(cacheDir: "$dir")) then: cache.getCacheDir() == dir @@ -99,7 +99,7 @@ class SingularityCacheTest extends Specification { def dir = Files.createTempDirectory('test') when: - def cache = new SingularityCache(GroovyMock(ContainerConfig), [NXF_SINGULARITY_CACHEDIR: "$dir"]) + def cache = SingularityCache.create(new SingularityConfig([:]), [NXF_SINGULARITY_CACHEDIR: "$dir"]) then: cache.getCacheDir() == dir @@ -116,9 +116,9 @@ class SingularityCacheTest extends Specification { def LOCAL = 'foo-latest.img' def TARGET_FILE = dir.resolve(LOCAL) def TEMP_FILE = dir.resolve('foo-latest.pulling'); TEMP_FILE.text = 'foo' - ContainerConfig config = [noHttps: true] + def config = new SingularityConfig(noHttps: true) and: - def cache = Spy(new SingularityCache(config)) + def cache = Spy(SingularityCache.create(config)) when: def result = cache.downloadContainerImage(IMAGE) @@ -148,7 +148,7 @@ class SingularityCacheTest extends Specification { def container = dir.resolve(LOCAL) container.text = 'dummy' and: - def cache = Spy(new SingularityCache([:] as ContainerConfig)) + def cache = Spy(SingularityCache.create(new SingularityConfig([:]))) when: def result = cache.downloadContainerImage(IMAGE) @@ -172,7 +172,7 @@ class SingularityCacheTest extends Specification { def container = dir.resolve(LOCAL) container.text = 'dummy' and: - def cache = Spy(new SingularityCache([:] as ContainerConfig)) + def cache = Spy(SingularityCache.create(new SingularityConfig([:]))) when: def result = cache.downloadContainerImage(IMAGE) @@ -197,7 +197,7 @@ class SingularityCacheTest extends Specification { def dir = Paths.get('/test/path') def container = dir.resolve(LOCAL) and: - def cache = Spy(new SingularityCache([:] as ContainerConfig)) + def cache = Spy(SingularityCache.create(new SingularityConfig([:]))) when: def file = cache.getCachePathFor(IMAGE) diff --git a/modules/nextflow/src/test/groovy/nextflow/container/UdockerBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/UdockerBuilderTest.groovy deleted file mode 100644 index 147f7ed3db..0000000000 --- a/modules/nextflow/src/test/groovy/nextflow/container/UdockerBuilderTest.groovy +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2013-2024, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package nextflow.container - -import java.nio.file.Paths - -import spock.lang.Specification - -/** - * - * @author Paolo Di Tommaso - */ -class UdockerBuilderTest extends Specification { - - - def 'test udocker env'() { - - given: - def builder = new UdockerBuilder('x') - - expect: - builder.makeEnv('X=1').toString() == '-e "X=1"' - builder.makeEnv([VAR_X:1, VAR_Y: 2]).toString() == '-e "VAR_X=1" -e "VAR_Y=2"' - } - - - def 'test udocker mounts'() { - - given: - def builder = new UdockerBuilder('x') - def files = [Paths.get('/folder/data'), Paths.get('/folder/db'), Paths.get('/folder/db') ] - def real = [ Paths.get('/user/yo/nextflow/bin'), Paths.get('/user/yo/nextflow/work'), Paths.get('/db/pdb/local/data') ] - def quotes = [ Paths.get('/folder with blanks/A'), Paths.get('/folder with blanks/B') ] - - expect: - builder.makeVolumes([]).toString() == '-v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" ' - builder.makeVolumes(files).toString() == '-v /folder:/folder -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" ' - builder.makeVolumes(real).toString() == '-v /user/yo/nextflow:/user/yo/nextflow -v /db/pdb/local/data:/db/pdb/local/data -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" ' - builder.makeVolumes(quotes).toString() == '-v /folder\\ with\\ blanks:/folder\\ with\\ blanks -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" ' - - } - - def 'should get run cmd line' () { - - given: - def env = [FOO:1, BAR:'hello world'] - def db_file = Paths.get('/home/db') - - expect: - new UdockerBuilder('fedora') - .build() - .runCommandRaw == 'udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "fedora:latest")' - - new UdockerBuilder('fedora') - .addEnv(env) - .build() - .runCommandRaw == 'udocker.py run --rm -e "FOO=1" -e "BAR=hello world" -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "fedora:latest")' - - new UdockerBuilder('fedora') - .setCpuset('1,2') - .build() - .runCommandRaw == 'udocker.py run --rm --cpuset-cpus=1,2 -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "fedora:latest")' - - new UdockerBuilder('fedora') - .addMount(db_file) - .addEnv(env) - .build() - .runCommandRaw == 'udocker.py run --rm -e "FOO=1" -e "BAR=hello world" -v /home/db:/home/db -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "fedora:latest")' - - new UdockerBuilder('busybox') - .params(remove: false) - .build() - .runCommandRaw == 'udocker.py run -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "busybox:latest")' - - new UdockerBuilder('busybox') - .params(runOptions: '-x --zeta') - .build() - .runCommandRaw == 'udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome -x --zeta $(udocker.py create "busybox:latest")' - - new UdockerBuilder('busybox') - .params(entry: '/bin/blah') - .build() - .runCommandRaw == 'udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "busybox:latest")' - - } - - def 'should append the run command line' () { - - given: - def builder = new UdockerBuilder('ubuntu:latest') - - when: - def result = builder.build().getRunCommand() - then: - result == ''' - ((udocker.py images | grep -E -o "^ubuntu:latest\\s") || udocker.py pull "ubuntu:latest")>/dev/null - [[ $? != 0 ]] && echo "Udocker failed while pulling container \\`ubuntu:latest\\`" >&2 && exit 1 - udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "ubuntu:latest") - ''' - .stripIndent().trim() - - builder.getRemoveCommand() == null - builder.getKillCommand() == '[[ "$pid" ]] && nxf_kill $pid' - } - - def 'should append the run command line with launcher' () { - - when: - def builder = new UdockerBuilder('ubuntu:latest') - def result = builder.build().getRunCommand('bwa --this --that') - then: - result == ''' - ((udocker.py images | grep -E -o "^ubuntu:latest\\s") || udocker.py pull "ubuntu:latest")>/dev/null - [[ $? != 0 ]] && echo "Udocker failed while pulling container \\`ubuntu:latest\\`" >&2 && exit 1 - udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "ubuntu:latest") bwa --this --that - ''' - .stripIndent().trim() - - builder.getRemoveCommand() == null - builder.getKillCommand() == '[[ "$pid" ]] && nxf_kill $pid' - - - when: - builder = new UdockerBuilder('ubuntu:latest').params(entry:'/bin/bash') - result = builder.build().getRunCommand('bwa --this --that') - then: - result == ''' - ((udocker.py images | grep -E -o "^ubuntu:latest\\s") || udocker.py pull "ubuntu:latest")>/dev/null - [[ $? != 0 ]] && echo "Udocker failed while pulling container \\`ubuntu:latest\\`" >&2 && exit 1 - udocker.py run --rm -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --bindhome $(udocker.py create "ubuntu:latest") /bin/bash -c "bwa --this --that" - ''' - .stripIndent().trim() - - builder.getRemoveCommand() == null - builder.getKillCommand() == '[[ "$pid" ]] && nxf_kill $pid' - } - -} diff --git a/modules/nextflow/src/test/groovy/nextflow/container/resolver/DefaultContainerResolverTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/resolver/DefaultContainerResolverTest.groovy index f1c6e3dc9b..1216c4777e 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/resolver/DefaultContainerResolverTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/resolver/DefaultContainerResolverTest.groovy @@ -17,7 +17,7 @@ package nextflow.container.resolver -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.executor.Executor import nextflow.processor.TaskProcessor import nextflow.processor.TaskRun @@ -46,7 +46,7 @@ class DefaultContainerResolverTest extends Specification { when: def result = resolver.resolveImage(task, 'ubuntu:latest') then: - 1 * task.getContainerConfig() >> new ContainerConfig([engine:'docker', enabled:true, registry:'quay.io']) + 1 * task.getContainerConfig() >> new DockerConfig([enabled:true, registry:'quay.io']) and: result.source == 'ubuntu:latest' result.target == 'quay.io/ubuntu:latest' diff --git a/modules/nextflow/src/test/groovy/nextflow/dag/DotRendererTest.groovy b/modules/nextflow/src/test/groovy/nextflow/dag/DotRendererTest.groovy index 52e5b61c23..ea591a42b6 100644 --- a/modules/nextflow/src/test/groovy/nextflow/dag/DotRendererTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/dag/DotRendererTest.groovy @@ -54,7 +54,7 @@ class DotRendererTest extends Specification { dag.normalize() when: - new DotRenderer('TheGraph').renderDocument(dag, file) + new DotRenderer('TheGraph', 'TB').renderDocument(dag, file) then: file.text == ''' diff --git a/modules/nextflow/src/test/groovy/nextflow/dag/MermaidRendererTest.groovy b/modules/nextflow/src/test/groovy/nextflow/dag/MermaidRendererTest.groovy index 30289a0b20..112154233d 100644 --- a/modules/nextflow/src/test/groovy/nextflow/dag/MermaidRendererTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/dag/MermaidRendererTest.groovy @@ -19,7 +19,7 @@ package nextflow.dag import java.nio.file.Files import groovyx.gpars.dataflow.DataflowQueue -import nextflow.Session +import nextflow.trace.config.DagConfig import spock.lang.Specification /** * @@ -33,16 +33,16 @@ class MermaidRendererTest extends Specification { def ch1 = new DataflowQueue() def ch2 = new DataflowQueue() def ch3 = new DataflowQueue() - - def session = new Session([dag: [verbose: true]]) + and: def dag = new DAG() dag.addOperatorNode('Op1', ch1, ch2) dag.addOperatorNode('Op2', ch2, ch3) - dag.normalize() + and: + def config = new DagConfig(verbose: true) when: - new MermaidRenderer().renderDocument(dag, file) + new MermaidRenderer(config).renderDocument(dag, file) then: file.text == ''' diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy index 8269e89077..329862828b 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/AbstractGridExecutorTest.groovy @@ -55,16 +55,15 @@ class AbstractGridExecutorTest extends Specification { def 'should return a custom job name'() { given: - def exec = [:] as AbstractGridExecutor - exec.session = [:] as Session - exec.session.config = [:] + def exec = Spy(AbstractGridExecutor) - expect: + when: + exec.config = new ExecutorConfig([:]) + then: exec.resolveCustomJobName(Mock(TaskRun)) == null when: - exec.session = [:] as Session - exec.session.config = [ executor: [jobName: { task.name.replace(' ','_') } ] ] + exec.config = new ExecutorConfig(jobName: { task.name.replace(' ','_') }) then: exec.resolveCustomJobName(new TaskRun(config: [name: 'hello world'])) == 'hello_world' @@ -73,19 +72,18 @@ class AbstractGridExecutorTest extends Specification { def 'should return job submit name' () { given: - def exec = [:] as AbstractGridExecutor - exec.session = [:] as Session - exec.session.config = [:] + def exec = Spy(AbstractGridExecutor) final taskName = 'Hello world' final taskRun = new TaskRun(name: taskName, config: [name: taskName]) - expect: + when: + exec.config = new ExecutorConfig([:]) + then: exec.getJobNameFor(taskRun) == 'nf-Hello_world' when: - exec.session = [:] as Session - exec.session.config = [ executor: [jobName: { task.name.replace(' ','_') } ] ] + exec.config = new ExecutorConfig(jobName: { task.name.replace(' ','_') }) then: exec.getJobNameFor(taskRun) == 'Hello_world' @@ -137,10 +135,9 @@ class AbstractGridExecutorTest extends Specification { def STATUS = ['123': AbstractGridExecutor.QueueStatus.RUNNING] def NAME = 'TheExecutorName' and: - def session = Mock(Session) { getConfig()>>[:] } + def config = Spy(new ExecutorConfig([:])) and: - def exec = Spy(AbstractGridExecutor) - exec.session = session + def exec = Spy(AbstractGridExecutor) { getConfig() >> config } exec.@queueInterval = Duration.of('1m') exec.name = NAME @@ -148,7 +145,7 @@ class AbstractGridExecutorTest extends Specification { when: def result = exec.getQueueStatus('foo') then: - 1 * session.getExecConfigProp(NAME,'queueGlobalStatus',false)>>false + 1 * config.getExecConfigProp(NAME,'queueGlobalStatus',false)>>false 1 * exec.getQueueStatus0('foo') >> STATUS and: result == STATUS @@ -157,7 +154,7 @@ class AbstractGridExecutorTest extends Specification { when: result = exec.getQueueStatus('foo') then: - 1 * session.getExecConfigProp(NAME,'queueGlobalStatus',false)>>true + 1 * config.getExecConfigProp(NAME,'queueGlobalStatus',false)>>true 1 * exec.getQueueStatus0(null) >> STATUS and: result == STATUS diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy index 3ec807639f..4b48881ffa 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/BashWrapperBuilderTest.groovy @@ -23,9 +23,13 @@ import java.nio.file.Paths import nextflow.Session import nextflow.SysEnv -import nextflow.container.ContainerConfig import nextflow.container.DockerBuilder +import nextflow.container.DockerConfig +import nextflow.container.PodmanConfig +import nextflow.container.SarusConfig +import nextflow.container.ShifterConfig import nextflow.container.SingularityBuilder +import nextflow.container.SingularityConfig import nextflow.processor.TaskBean import nextflow.util.MustacheTemplateEngine import org.yaml.snakeyaml.Yaml @@ -203,7 +207,7 @@ class BashWrapperBuilderTest extends Specification { bash.isStatsEnabled() >> false bash.getStageInMode() >> 'symlink' bash.getInputFiles() >> [:] - bash.getContainerConfig() >> [engine: 'singularity', envWhitelist: 'FOO,BAR'] + bash.getContainerConfig() >> new SingularityConfig(envWhitelist: 'FOO,BAR') bash.getContainerImage() >> 'foo/bar' bash.getContainerMount() >> null bash.getContainerMemory() >> null @@ -226,7 +230,7 @@ class BashWrapperBuilderTest extends Specification { def 'should add resolved inputs'() { given: def bash = Spy(new BashWrapperBuilder(Mock(TaskBean))) - bash.getContainerConfig() >> [engine: 'docker'] + bash.getContainerConfig() >> new DockerConfig([:]) def BUILDER = Mock(DockerBuilder) def INPUTS = ['/some/path': Paths.get('/store/path.txt')] @@ -235,7 +239,7 @@ class BashWrapperBuilderTest extends Specification { when: bash.createContainerBuilder(null) then: - bash.createContainerBuilder0('docker') >> BUILDER + bash.createContainerBuilder0() >> BUILDER bash.getInputFiles() >> INPUTS bash.getStageInMode() >> null 1 * BUILDER.addMountForInputs(INPUTS) >> null @@ -244,7 +248,7 @@ class BashWrapperBuilderTest extends Specification { when: bash.createContainerBuilder(null) then: - bash.createContainerBuilder0('docker') >> BUILDER + bash.createContainerBuilder0() >> BUILDER bash.getStageInMode() >> 'copy' 0 * BUILDER.addMountForInputs(_) >> null } @@ -338,7 +342,7 @@ class BashWrapperBuilderTest extends Specification { arrayIndexName: 'SLURM_ARRAY_TASK_ID', arrayIndexStart: 0, arrayWorkDirs: [ Path.of('/work/01'), Path.of('/work/02'), Path.of('/work/03') ], - containerConfig: [enabled: true], + containerConfig: new DockerConfig(enabled: true), containerImage: 'quay.io/nextflow:bash', outputFiles: ['foo.txt', '*.bar', '**/baz'] ) @@ -395,7 +399,7 @@ class BashWrapperBuilderTest extends Specification { arrayIndexName: 'SLURM_ARRAY_TASK_ID', arrayIndexStart: 0, arrayWorkDirs: [ Path.of('/work/01'), Path.of('/work/02'), Path.of('/work/03') ], - containerConfig: [enabled: true], + containerConfig: new DockerConfig(enabled: true), containerImage: 'quay.io/nextflow:bash', outputFiles: ['foo.txt', '*.bar', '**/baz'] ) @@ -592,7 +596,7 @@ class BashWrapperBuilderTest extends Specification { binding.container_env == null when: - binding = newBashWrapperBuilder(environment: [FOO:'aa', BAR:'bb'], containerEnabled: true, containerImage: 'foo', containerConfig: [engine: 'docker']).makeBinding() + binding = newBashWrapperBuilder(environment: [FOO:'aa', BAR:'bb'], containerEnabled: true, containerImage: 'foo', containerConfig: new DockerConfig([:])).makeBinding() then: binding.containsKey('task_env') binding.containsKey('container_env') @@ -848,7 +852,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerImage: 'busybox', containerEnabled: true, - containerConfig: [engine: 'docker', temp: 'auto', enabled: true] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -862,7 +866,7 @@ class BashWrapperBuilderTest extends Specification { containerImage: 'busybox', containerEnabled: true, environment: [FOO: 'something'], - containerConfig: [engine: 'docker', temp: 'auto', enabled: true] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -c "eval $(nxf_container_env); /bin/bash -ue /work/dir/.command.sh"' @@ -883,7 +887,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerImage: 'busybox', containerEnabled: true, - containerConfig: [engine: 'docker', temp: 'auto', enabled: true, entrypointOverride: false] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true, entrypointOverride: false) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -897,7 +901,7 @@ class BashWrapperBuilderTest extends Specification { containerImage: 'busybox', containerEnabled: true, environment: [FOO:'hello'], - containerConfig: [engine: 'docker', temp: 'auto', enabled: true, entrypointOverride: false] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true, entrypointOverride: false) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -c "eval $(nxf_container_env); /bin/bash -ue /work/dir/.command.sh"' @@ -917,7 +921,7 @@ class BashWrapperBuilderTest extends Specification { when: def binding = newBashWrapperBuilder( containerImage: 'busybox', - containerConfig: [engine: 'docker', sudo: true, enabled: true], + containerConfig: new DockerConfig(sudo: true, enabled: true), containerEnabled: true ).makeBinding() then: @@ -931,7 +935,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerEnabled: true, containerImage: 'ubuntu', - containerConfig: [engine: 'docker', temp: 'auto', enabled: true, remove:false, kill: false] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true, remove: false, kill: false) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID ubuntu /bin/bash -ue /work/dir/.command.sh' @@ -945,7 +949,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerEnabled: true, containerImage: 'ubuntu', - containerConfig: [engine: 'docker', enabled: true, remove:false, kill: 'SIGXXX'] ).makeBinding() + containerConfig: new DockerConfig(enabled: true, remove: false, kill: 'SIGXXX') ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID ubuntu /bin/bash -ue /work/dir/.command.sh' @@ -960,7 +964,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'busybox', containerMount: '/folder with blanks' as Path, - containerConfig: [engine: 'docker', enabled: true] ).makeBinding() + containerConfig: new DockerConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v /folder\\ with\\ blanks:/folder\\ with\\ blanks -v /work/dir:/work/dir -w "\$NXF_TASK_WORKDIR" --name \$NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -975,7 +979,7 @@ class BashWrapperBuilderTest extends Specification { script: 'echo Hello world!', containerEnabled: true, containerImage: 'busybox', - containerConfig: [engine: 'docker', sudo: true, enabled: true] ).makeBinding() + containerConfig: new DockerConfig(sudo: true, enabled: true) ).makeBinding() then: binding.launch_cmd == 'sudo docker run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -992,7 +996,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'busybox', containerOptions: '-v /foo:/bar', - containerConfig: [engine: 'docker', enabled: true] ).makeBinding() + containerConfig: new DockerConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'docker run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" -v /foo:/bar --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -1006,7 +1010,7 @@ class BashWrapperBuilderTest extends Specification { containerImage: 'busybox', containerEnabled: true, containerPlatform: 'linux/arm64', - containerConfig: [engine: 'docker', temp: 'auto', enabled: true] ).makeBinding() + containerConfig: new DockerConfig(temp: 'auto', enabled: true) ).makeBinding() then: binding.launch_cmd == 'docker run -i --platform linux/arm64 -e "NXF_TASK_WORKDIR" -v $(nxf_mktemp):/tmp -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -1019,7 +1023,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerEnabled: true, containerImage: 'busybox', - containerConfig: [enabled: true, engine: 'sarus'] as ContainerConfig ).makeBinding() + containerConfig: new SarusConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == '''\ @@ -1036,7 +1040,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'busybox', environment: [FOO: 'xxx'], - containerConfig: [enabled: true, engine: 'sarus'] as ContainerConfig ).makeBinding() + containerConfig: new SarusConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == '''\ @@ -1061,7 +1065,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'busybox', containerMount: '/folder with blanks' as Path, - containerConfig: [enabled: true, engine: 'sarus'] as ContainerConfig ).makeBinding() + containerConfig: new SarusConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == '''\ @@ -1078,7 +1082,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'busybox', containerOptions: '--mount=type=bind,source=/foo,destination=/bar', - containerConfig: [enabled: true, engine: 'sarus'] as ContainerConfig ).makeBinding() + containerConfig: new SarusConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == '''\ @@ -1095,7 +1099,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'docker://ubuntu:latest', environment: [PATH: '/path/to/bin:$PATH', FOO: 'xxx'], - containerConfig: [enabled: true, engine: 'shifter'] as ContainerConfig ).makeBinding() + containerConfig: new ShifterConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == '''\ @@ -1119,7 +1123,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'docker://ubuntu:latest', environment: [PATH: '/path/to/bin:$PATH', FOO: 'xxx'], - containerConfig: [enabled: true, engine: 'singularity'] as ContainerConfig ).makeBinding() + containerConfig: new SingularityConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${NXF_TASK_WORKDIR:+SINGULARITYENV_NXF_TASK_WORKDIR="$NXF_TASK_WORKDIR"} singularity exec --no-home --pid -B /work/dir docker://ubuntu:latest /bin/bash -c "cd $NXF_TASK_WORKDIR; eval $(nxf_container_env); /bin/bash -ue /work/dir/.command.sh"' @@ -1133,7 +1137,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'docker://ubuntu:latest', environment: [:], - containerConfig: [enabled: true, engine: 'singularity'] as ContainerConfig ).makeBinding() + containerConfig: new SingularityConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${NXF_TASK_WORKDIR:+SINGULARITYENV_NXF_TASK_WORKDIR="$NXF_TASK_WORKDIR"} singularity exec --no-home --pid -B /work/dir docker://ubuntu:latest /bin/bash -c "cd $NXF_TASK_WORKDIR; /bin/bash -ue /work/dir/.command.sh"' @@ -1147,7 +1151,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'docker://ubuntu:latest', environment: [PATH: '/path/to/bin:$PATH', FOO: 'xxx'], - containerConfig: [enabled: true, engine: 'singularity', entrypointOverride: true] as ContainerConfig ).makeBinding() + containerConfig: new SingularityConfig(enabled: true, entrypointOverride: true) ).makeBinding() then: binding.launch_cmd == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${NXF_TASK_WORKDIR:+SINGULARITYENV_NXF_TASK_WORKDIR="$NXF_TASK_WORKDIR"} singularity exec --no-home --pid -B /work/dir docker://ubuntu:latest /bin/bash -c "cd $NXF_TASK_WORKDIR; eval $(nxf_container_env); /bin/bash -ue /work/dir/.command.sh"' @@ -1161,7 +1165,7 @@ class BashWrapperBuilderTest extends Specification { containerEnabled: true, containerImage: 'docker://ubuntu:latest', environment: [PATH: '/path/to/bin:$PATH', FOO: 'xxx'], - containerConfig: [enabled: true, engine: 'singularity', oci: true] as ContainerConfig ).makeBinding() + containerConfig: new SingularityConfig(enabled: true, ociMode: true) ).makeBinding() then: binding.launch_cmd == 'set +u; env - PATH="$PATH" ${TMP:+SINGULARITYENV_TMP="$TMP"} ${TMPDIR:+SINGULARITYENV_TMPDIR="$TMPDIR"} ${XDG_RUNTIME_DIR:+XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"} ${DBUS_SESSION_BUS_ADDRESS:+DBUS_SESSION_BUS_ADDRESS="$DBUS_SESSION_BUS_ADDRESS"} ${NXF_TASK_WORKDIR:+SINGULARITYENV_NXF_TASK_WORKDIR="$NXF_TASK_WORKDIR"} singularity exec --no-home --oci -B /work/dir docker://ubuntu:latest /bin/bash -c "cd $NXF_TASK_WORKDIR; eval $(nxf_container_env); /bin/bash -ue /work/dir/.command.sh"' @@ -1194,7 +1198,7 @@ class BashWrapperBuilderTest extends Specification { binding = newBashWrapperBuilder(environment: ENV, containerEnabled: true, containerImage: 'busybox', - containerConfig: [enabled: true, engine: 'docker']).makeBinding() + containerConfig: new DockerConfig(enabled: true)).makeBinding() then: binding.task_env == null binding.container_env == ''' @@ -1401,7 +1405,7 @@ class BashWrapperBuilderTest extends Specification { def binding = newBashWrapperBuilder( containerImage: 'busybox', containerEnabled: true, - containerConfig: [engine: 'podman', enabled: true] ).makeBinding() + containerConfig: new PodmanConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'podman run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' @@ -1410,16 +1414,22 @@ class BashWrapperBuilderTest extends Specification { } def 'should create wrapper with podman with legacy entrypoint' () { + given: + SysEnv.push(NXF_CONTAINER_ENTRYPOINT_OVERRIDE: 'true') + when: def binding = newBashWrapperBuilder( containerImage: 'busybox', containerEnabled: true, - containerConfig: [engine: 'podman', enabled: true, entrypointOverride: true] ).makeBinding() + containerConfig: new PodmanConfig(enabled: true, entrypointOverride: true) ).makeBinding() then: binding.launch_cmd == 'podman run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -w "$NXF_TASK_WORKDIR" --entrypoint /bin/bash --name $NXF_BOXID busybox -c "/bin/bash -ue /work/dir/.command.sh"' binding.cleanup_cmd == 'podman rm $NXF_BOXID &>/dev/null || true\n' binding.kill_cmd == 'podman stop $NXF_BOXID' + + cleanup: + SysEnv.pop() } def 'should create wrapper with podman and scratch' () { @@ -1428,7 +1438,7 @@ class BashWrapperBuilderTest extends Specification { scratch: true, containerImage: 'busybox', containerEnabled: true, - containerConfig: [engine: 'podman', enabled: true] ).makeBinding() + containerConfig: new PodmanConfig(enabled: true) ).makeBinding() then: binding.launch_cmd == 'podman run -i -e "NXF_TASK_WORKDIR" -v /work/dir:/work/dir -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" --name $NXF_BOXID busybox /bin/bash -ue /work/dir/.command.sh' diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/BatchCleanupTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/BatchCleanupTest.groovy index 905a5e51fa..4535e743b8 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/BatchCleanupTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/BatchCleanupTest.groovy @@ -50,10 +50,10 @@ class BatchCleanupTest extends Specification { given: def batch = new BatchCleanup() - def lsf = Mock(AbstractGridExecutor) + def lsf = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([killBatchSize: 5]) + } lsf.getName() >> 'lsf' - and: - batch.size = 5 when: 5.times { batch.collect(lsf, 100+it) } diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/CondorExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/CondorExecutorTest.groovy index a9f7c02bc4..8f9d9bd078 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/CondorExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/CondorExecutorTest.groovy @@ -18,7 +18,7 @@ package nextflow.executor import java.nio.file.Files import nextflow.Session -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.script.ProcessConfig import nextflow.processor.TaskConfig import nextflow.processor.TaskProcessor @@ -230,7 +230,7 @@ class CondorExecutorTest extends Specification { given: def session = Mock(Session) - session.getContainerConfig() >> new ContainerConfig(enabled:false) + session.getContainerConfig() >> new DockerConfig(enabled:false) def folder = Files.createTempDirectory('test') def executor = [:] as CondorExecutor def task = new TaskRun(name: 'Hello', workDir: folder, script: 'echo Hello world!') diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/CrgExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/CrgExecutorTest.groovy index f683bec657..6489325907 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/CrgExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/CrgExecutorTest.groovy @@ -19,7 +19,7 @@ package nextflow.executor import java.nio.file.Paths import nextflow.Session -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.processor.TaskArrayRun import nextflow.processor.TaskConfig import nextflow.processor.TaskProcessor @@ -220,7 +220,7 @@ class CrgExecutorTest extends Specification { def 'should get headers/2' () { given: def sess = Mock(Session) { - getContainerConfig() >> new ContainerConfig([engine:'docker',enabled: false]) + getContainerConfig() >> new DockerConfig(enabled: false) } and: def executor = Spy(new CrgExecutor()) { isContainerNative()>>false } @@ -482,7 +482,9 @@ class CrgExecutorTest extends Specification { task.workDir = Paths.get('/some/dir') task.script = 'echo hello' task.processor = Mock(TaskProcessor) - task.processor.getSession() >> Mock(Session) { getContainerConfig() >> [:] } + task.processor.getSession() >> Mock(Session) { + getContainerConfig() >> new DockerConfig([:]) + } task.processor.getProcessEnvironment() >> [:] task.processor.getConfig() >> [:] task.processor.getExecutor() >> Mock(Executor) @@ -508,7 +510,7 @@ class CrgExecutorTest extends Specification { def 'should add cpuset option to docker command /2' () { given: def sess = Mock(Session) { - getContainerConfig(null) >> new ContainerConfig(enabled: true, engine:'docker') + getContainerConfig(null) >> new DockerConfig(enabled: true) } and: def executor = Spy(new CrgExecutor(session: sess)) { isContainerNative()>>false } @@ -547,7 +549,7 @@ class CrgExecutorTest extends Specification { def 'should add cpuset option to docker command /3' () { given: def sess = Mock(Session) { - getContainerConfig(null) >> new ContainerConfig(enabled: true, engine:'docker', legacy:true) + getContainerConfig(null) >> new DockerConfig(enabled: true, legacy:true) } and: def executor = Spy(new CrgExecutor(session: sess)) { isContainerNative()>>false } diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/ExecutorConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/ExecutorConfigTest.groovy new file mode 100644 index 0000000000..4dd3a17234 --- /dev/null +++ b/modules/nextflow/src/test/groovy/nextflow/executor/ExecutorConfigTest.groovy @@ -0,0 +1,120 @@ +/* + * Copyright 2013-2024, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package nextflow.executor + +import nextflow.util.Duration +import spock.lang.Specification +/** + * + * @author Paolo Di Tommaso + */ +class ExecutorConfigTest extends Specification { + + + def 'test get queue size'() { + + when: + def config = new ExecutorConfig([ '$sge':[queueSize: 123] ]) + then: + config.getQueueSize('sge', 1) == 123 + config.getQueueSize('xxx', 1) == 1 + config.getQueueSize(null, 1) == 1 + + when: + config = new ExecutorConfig([ queueSize: 321, '$sge':[queueSize:789] ]) + then: + config.getQueueSize('sge', 2) == 789 + config.getQueueSize('xxx', 2) == 321 + config.getQueueSize(null, 2) == 321 + + when: + config = new ExecutorConfig([:]) + then: + config.getQueueSize('sge', 1) == 1 + config.getQueueSize('xxx', 2) == 2 + config.getQueueSize(null, 3) == 3 + + } + + def 'test get poll interval'() { + + when: + def config = new ExecutorConfig(['$sge':[pollInterval: 345] ]) + then: + config.getPollInterval('sge').toMillis() == 345 + config.getPollInterval('xxx').toMillis() == 1_000 + config.getPollInterval(null).toMillis() == 1_000 + config.getPollInterval(null, 2_000 as Duration).toMillis() == 2_000 + + when: + config = new ExecutorConfig([ pollInterval: 321, '$sge':[pollInterval:789] ]) + then: + config.getPollInterval('sge').toMillis() == 789 + config.getPollInterval('xxx').toMillis() == 321 + config.getPollInterval(null).toMillis() == 321 + + } + + def 'test get exit read timeout'() { + + setup: + def config = new ExecutorConfig(['$sge':[exitReadTimeout: '5s'] ]) + + expect: + config.getExitReadTimeout('sge') == '5sec' as Duration + config.getExitReadTimeout('lsf') == '270sec' as Duration + + } + + def 'test get queue stat interval'() { + + setup: + def config = new ExecutorConfig(['$sge':[queueStatInterval: '4sec'] ]) + + expect: + config.getQueueStatInterval('sge') == '4sec' as Duration + config.getQueueStatInterval('lsf') == '1min' as Duration + + } + + def 'test monitor dump interval'() { + + setup: + def config = new ExecutorConfig(['$sge':[dumpInterval: '6sec'] ]) + + expect: + config.getMonitorDumpInterval('sge') == '6sec' as Duration + config.getMonitorDumpInterval('lsf') == '5min' as Duration + + } + + def 'test get exec config prop'() { + + when: + def config = new ExecutorConfig([cpus:123, queueSize:222, '$hazelcast': [queueSize:333] ]) + then: + config.getExecConfigProp( 'hazelcast', 'cpus', null ) == 123 + config.getExecConfigProp( 'hazelcast', 'queueSize', null ) == 333 + config.getExecConfigProp( 'local', 'queueSize', null ) == 222 + config.getExecConfigProp( 'local', 'queueSize', 'beta') == 222 + config.getExecConfigProp( 'hazelcast', 'jobName', null ) == null + config.getExecConfigProp( 'hazelcast', 'jobName', 'alpha') == 'alpha' + config.getExecConfigProp( 'hazelcast', 'jobName', 'alpha', [NXF_EXECUTOR_JOBNAME:'hola']) == 'hola' + } + + +} diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/GridExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/GridExecutorTest.groovy index 2d064c40b4..a342820e57 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/GridExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/GridExecutorTest.groovy @@ -34,7 +34,9 @@ class GridExecutorTest extends Specification { def work = Files.createTempDirectory('test') def task = new TaskRun(workDir: work, name: 'hello', config: new TaskConfig(queue: 'gamma')) - def executor = Mock(AbstractGridExecutor) + def executor = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } when: def handler = new GridTaskHandler(task, executor) @@ -55,7 +57,9 @@ class GridExecutorTest extends Specification { def task = Mock(TaskRun) task.getWorkDir() >> workDir - def executor = Mock(AbstractGridExecutor) + def executor = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } when: def handler = new GridTaskHandler(task, executor) @@ -93,7 +97,9 @@ class GridExecutorTest extends Specification { def task = new TaskRun() task.workDir = Files.createTempDirectory('testHandler') - def executor = Mock(AbstractGridExecutor) + def executor = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } when: def handler = new GridTaskHandler(task, executor) @@ -112,7 +118,9 @@ class GridExecutorTest extends Specification { def task = new TaskRun(name: 'task1') task.workDir = Files.createTempDirectory('testHandler') - def executor = Mock(AbstractGridExecutor) + def executor = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } executor.checkActiveStatus(_) >> { return true } when: @@ -142,7 +150,9 @@ class GridExecutorTest extends Specification { def task = new TaskRun(name: 'task1') task.workDir = Files.createTempDirectory('testHandler') - def executor = Mock(AbstractGridExecutor) + def executor = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } executor.checkActiveStatus(_) >> { true } when: diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/GridTaskHandlerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/GridTaskHandlerTest.groovy index b406499025..b8abe19dbf 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/GridTaskHandlerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/GridTaskHandlerTest.groovy @@ -20,7 +20,7 @@ package nextflow.executor import java.nio.file.Path import java.nio.file.Paths -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.exception.ProcessFailedException import nextflow.exception.ProcessNonZeroExitStatusException import nextflow.file.FileHelper @@ -54,7 +54,9 @@ class GridTaskHandlerTest extends Specification { def 'should capture error cause' () { given: def task = new TaskRun(name: 'foo', workDir: Paths.get('/some/work')) - def exec = Mock(AbstractGridExecutor) + def exec = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } def handler = Spy(new GridTaskHandler(task, exec)) when: @@ -83,7 +85,9 @@ class GridTaskHandlerTest extends Specification { def task = Mock(TaskRun) { getWorkDir() >> WORK_DIR } - def exec = Mock(AbstractGridExecutor) + def exec = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } def handler = Spy(new GridTaskHandler(task, exec)) when: @@ -109,11 +113,13 @@ class GridTaskHandlerTest extends Specification { getLogFile() >> logFile getContainer() >> 'ubuntu:latest' getProcessor() >> Mock(TaskProcessor) - getContainerConfig() >> Mock(ContainerConfig) { getEngine()>>'docker' } + getContainerConfig() >> Mock(DockerConfig) toTaskBean() >> Mock(TaskBean) { getWorkDir()>>WORK_DIR; getInputFiles()>>[:] } getConfig() >> Mock(TaskConfig) { getContainerOptions() >> '--this=that' } } - def exec = Mock(AbstractGridExecutor) + def exec = Mock(AbstractGridExecutor) { + getConfig() >> new ExecutorConfig([:]) + } def handler = Spy(new GridTaskHandler(task, exec)) when: diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/LsfExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/LsfExecutorTest.groovy index d66adf5d13..7ad66f287b 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/LsfExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/LsfExecutorTest.groovy @@ -34,10 +34,20 @@ import spock.lang.Unroll */ class LsfExecutorTest extends Specification { + def createExecutor(config) { + Spy(LsfExecutor) { + getConfig() >> config + } + } + + def createExecutor() { + createExecutor(new ExecutorConfig([:])) + } + def testCommandLine() { when: - def executor = Spy(LsfExecutor) + def executor = createExecutor() then: executor.getSubmitCommandLine(Mock(TaskRun), null) == ['bsub'] @@ -46,7 +56,7 @@ class LsfExecutorTest extends Specification { def testMemDirectiveMemUnit() { given: def WORK_DIR = Paths.get('/work/dir') - def executor = Spy(LsfExecutor) + def executor = createExecutor() executor.getSession() >> Mock(Session) and: def task = Mock(TaskRun) @@ -67,7 +77,7 @@ class LsfExecutorTest extends Specification { def testMemDirectiveMemUnit2() { given: def WORK_DIR = Paths.get('/work/dir') - def executor = Spy(new LsfExecutor(memUnit:'GB', usageUnit:'GB')) + def executor = createExecutor() executor.getSession() >> Mock(Session) and: def task = Mock(TaskRun) @@ -90,7 +100,7 @@ class LsfExecutorTest extends Specification { def testReserveMemPerTask() { given: def WORK_DIR = Paths.get('/work/dir') - def executor = Spy(new LsfExecutor(usageUnit:'KB', perJobMemLimit:true)) + def executor = createExecutor() executor.getSession() >> Mock(Session) and: def task = Mock(TaskRun) @@ -115,7 +125,7 @@ class LsfExecutorTest extends Specification { def testReserveMemPerTask2() { given: def WORK_DIR = Paths.get('/work/dir') - def executor = Spy(new LsfExecutor(perTaskReserve:true, perJobMemLimit: true, usageUnit:'KB')) + def executor = createExecutor() executor.getSession() >> Mock(Session) and: def task = Mock(TaskRun) @@ -124,6 +134,7 @@ class LsfExecutorTest extends Specification { when: executor.@perJobMemLimit = true executor.@perTaskReserve = true + executor.@usageUnit = 'KB' def result = executor.getDirectives(task, []) then: 1 * executor.getJobNameFor(task) >> 'foo' @@ -141,7 +152,7 @@ class LsfExecutorTest extends Specification { def 'test job script headers' () { setup: - def executor = Spy(LsfExecutor) + def executor = createExecutor() executor.@memUnit = 'MB' executor.@usageUnit = 'MB' executor.session = new Session() @@ -335,7 +346,7 @@ class LsfExecutorTest extends Specification { given: def config = new TaskConfig(clusterOptions: [], disk: '10GB') def WORKDIR = Paths.get('/my/work') - def executor = Spy(LsfExecutor) + def executor = createExecutor() executor.getSession() >> Mock(Session) executor.@memUnit = 'MB' and: @@ -372,7 +383,7 @@ class LsfExecutorTest extends Specification { when: // LSF executor - def executor = Spy(LsfExecutor) + def executor = createExecutor() executor.session = new Session() executor.@memUnit = 'MB' @@ -411,9 +422,9 @@ class LsfExecutorTest extends Specification { when: // LSF executor - def executor = Spy(LsfExecutor) + def config = new ExecutorConfig(perJobMemLimit: true) + def executor = createExecutor(config) executor.@memUnit = 'MB' - executor.session = new Session([executor: [perJobMemLimit: true]]) executor.register() then: @@ -433,7 +444,7 @@ class LsfExecutorTest extends Specification { given: // LSF executor - def executor = Spy(new LsfExecutor(memUnit: 'MB', usageUnit: 'MB')) + def executor = createExecutor() executor.session = new Session() executor.@memUnit = 'MB' executor.@usageUnit = 'MB' @@ -481,7 +492,7 @@ class LsfExecutorTest extends Specification { when: // executor stub object - def executor = Spy(LsfExecutor) + def executor = createExecutor() then: executor.parseJobId( 'Job <2329803> is submitted to default queue .' ) == '2329803' @@ -490,7 +501,7 @@ class LsfExecutorTest extends Specification { def testKillCommand() { when: // executor stub object - def executor = Spy(LsfExecutor) + def executor = createExecutor() then: executor.killTaskCommand('12345').join(' ') == 'bkill 12345' @@ -499,7 +510,7 @@ class LsfExecutorTest extends Specification { def testQstatCommand() { setup: - def executor = Spy(LsfExecutor) + def executor = createExecutor() def text = """\ JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME @@ -542,7 +553,7 @@ class LsfExecutorTest extends Specification { def 'should parse bjobs stats with extra headers' () { setup: - def executor = Spy(LsfExecutor) + def executor = createExecutor() def TEXT = ''' LSF is processing your request. Please wait ... LSF is processing your request. Please wait ... @@ -572,7 +583,7 @@ class LsfExecutorTest extends Specification { def testQueueStatusCommand() { setup: - def executor = Spy(LsfExecutor) + def executor = createExecutor() expect: executor.queueStatusCommand(null) == ['bjobs', '-w'] @@ -585,7 +596,7 @@ class LsfExecutorTest extends Specification { def testWrapString() { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() expect: executor.wrapHeader('') == '' @@ -599,7 +610,7 @@ class LsfExecutorTest extends Specification { def 'should apply lsf mem unit' () { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() executor.session = Mock(Session) when: @@ -620,78 +631,76 @@ class LsfExecutorTest extends Specification { def 'should apply per task reserve' () { given: - def session = Mock(Session) - def executor = Spy(LsfExecutor) - executor.session = session + def config = Spy(ExecutorConfig) + def executor = createExecutor(config) when: executor.register() then: 1 * executor.parseLsfConfig() >> [:] - 1 * session.getExecConfigProp(_,'perTaskReserve',_) >> false + 1 * config.getExecConfigProp(_,'perTaskReserve',_) >> false !executor.perTaskReserve when: executor.register() then: 1 * executor.parseLsfConfig() >> [RESOURCE_RESERVE_PER_TASK:'y'] - 1 * session.getExecConfigProp(_,'perTaskReserve',_) >> false + 1 * config.getExecConfigProp(_,'perTaskReserve',_) >> false !executor.perTaskReserve when: executor.register() then: 1 * executor.parseLsfConfig() >> [:] - 1 * session.getExecConfigProp(_,'perTaskReserve',_) >> true + 1 * config.getExecConfigProp(_,'perTaskReserve',_) >> true executor.perTaskReserve when: executor.register() then: 1 * executor.parseLsfConfig() >> [RESOURCE_RESERVE_PER_TASK:'y'] - 1 * session.getExecConfigProp(_,'perTaskReserve',_) >> { execName,name,defValue -> defValue } + 1 * config.getExecConfigProp(_,'perTaskReserve',_) >> { execName,name,defValue -> defValue } executor.perTaskReserve } def 'should apply lsf per job limit' () { given: - def session = Mock(Session) - def executor = Spy(LsfExecutor) - executor.session = session + def config = Spy(ExecutorConfig) + def executor = createExecutor(config) when: executor.register() then: 1 * executor.parseLsfConfig() >> [:] - 1 * session.getExecConfigProp(_,'perJobMemLimit',_) >> false + 1 * config.getExecConfigProp(_,'perJobMemLimit',_) >> false !executor.perJobMemLimit when: executor.register() then: 1 * executor.parseLsfConfig() >> [LSB_JOB_MEMLIMIT:'y'] - 1 * session.getExecConfigProp(_,'perJobMemLimit',_) >> false + 1 * config.getExecConfigProp(_,'perJobMemLimit',_) >> false !executor.perJobMemLimit when: executor.register() then: 1 * executor.parseLsfConfig() >> [:] - 1 * session.getExecConfigProp(_,'perJobMemLimit',_) >> true + 1 * config.getExecConfigProp(_,'perJobMemLimit',_) >> true executor.perJobMemLimit when: executor.register() then: 1 * executor.parseLsfConfig() >> [LSB_JOB_MEMLIMIT:'y'] - 1 * session.getExecConfigProp(_,'perJobMemLimit',_) >> { execName,name,defValue -> defValue } + 1 * config.getExecConfigProp(_,'perJobMemLimit',_) >> { execName,name,defValue -> defValue } executor.perJobMemLimit } def 'should parse lsf.config' () { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() def folder = Files.createTempDirectory('test') def file = folder.resolve('lsf.conf') file.text = ''' @@ -717,7 +726,7 @@ class LsfExecutorTest extends Specification { def 'should parse complex config file' () { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() def file = new File('src/test/resources/nextflow/executor/lsf.conf') assert file.exists(), 'Cannot find LSF config test file' @@ -740,7 +749,7 @@ class LsfExecutorTest extends Specification { @Unroll def 'should return valid job name given #name'() { given: - def executor = [:] as LsfExecutor + def executor = createExecutor() def task = Mock(TaskRun) task.getName() >> name @@ -758,7 +767,7 @@ class LsfExecutorTest extends Specification { def 'should get array index name and start' () { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() expect: executor.getArrayIndexName() == 'LSB_JOBINDEX' executor.getArrayIndexStart() == 1 @@ -767,7 +776,7 @@ class LsfExecutorTest extends Specification { @Unroll def 'should get array task id' () { given: - def executor = Spy(LsfExecutor) + def executor = createExecutor() expect: executor.getArrayTaskId(JOB_ID, TASK_INDEX) == EXPECTED @@ -787,10 +796,10 @@ class LsfExecutorTest extends Specification { task.processor.getSession() >> Mock(Session) task.config = Mock(TaskConfig) { getClusterOptionsAsList()>>[] } and: - def executor = Spy(LsfExecutor) + def config = new ExecutorConfig(account: ACCOUNT) + def executor = createExecutor(config) executor.getJobNameFor(_) >> 'foo' executor.getName() >> 'lsf' - executor.getSession() >> Mock(Session) { getExecConfigProp('lsf', 'account',null)>>ACCOUNT } when: def result = executor.getDirectives(task, []) diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy index 174a7b3b12..687ef13562 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/PbsExecutorTest.groovy @@ -19,7 +19,6 @@ package nextflow.executor import java.nio.file.Paths import nextflow.processor.TaskArrayRun -import nextflow.Session import nextflow.processor.TaskConfig import nextflow.processor.TaskProcessor import nextflow.processor.TaskRun @@ -32,10 +31,20 @@ import spock.lang.Unroll */ class PbsExecutorTest extends Specification { + def createExecutor(config) { + Spy(PbsExecutor) { + getConfig() >> config + } + } + + def createExecutor() { + createExecutor(new ExecutorConfig([:])) + } + def testGetCommandLine() { given: - def executor = Spy(PbsExecutor) + def executor = createExecutor() def task = Mock(TaskRun); task.getName() >> 'hello world' expect: @@ -46,8 +55,7 @@ class PbsExecutorTest extends Specification { def 'test job script headers'() { setup: - def executor = Spy(PbsExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() // mock process def proc = Mock(TaskProcessor) @@ -183,8 +191,7 @@ class PbsExecutorTest extends Specification { def WorkDirWithBlanks() { setup: - def executor = Spy(PbsExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() // mock process def proc = Mock(TaskProcessor) @@ -210,7 +217,7 @@ class PbsExecutorTest extends Specification { @Unroll def 'should return valid job name given #name'() { given: - def executor = [:] as PbsExecutor + def executor = createExecutor() def task = Mock(TaskRun) task.getName() >> name @@ -230,7 +237,7 @@ class PbsExecutorTest extends Specification { def testParseJobId() { given: - def executor = [:] as PbsExecutor + def executor = createExecutor() expect: executor.parseJobId('\n10.localhost\n') == '10.localhost' @@ -248,7 +255,7 @@ class PbsExecutorTest extends Specification { def testKillTaskCommand() { given: - def executor = [:] as PbsExecutor + def executor = createExecutor() expect: executor.killTaskCommand('100.localhost') == ['qdel', '100.localhost'] @@ -257,7 +264,7 @@ class PbsExecutorTest extends Specification { def testParseQueueStatus() { setup: - def executor = [:] as PbsExecutor + def executor = createExecutor() def text = """ Job Id: 12.localhost @@ -297,7 +304,7 @@ class PbsExecutorTest extends Specification { def 'should return qstat command line' () { given: - def executor = [:] as PbsExecutor + def executor = createExecutor() expect: executor.queueStatusCommand(null) == ['bash','-c', "set -o pipefail; qstat -f -1 | { grep -E '(Job Id:|job_state =)' || true; }"] @@ -319,7 +326,7 @@ class PbsExecutorTest extends Specification { def 'should get array index name and start' () { given: - def executor = Spy(PbsExecutor) + def executor = createExecutor() expect: executor.getArrayIndexName() == 'PBS_ARRAYID' executor.getArrayIndexStart() == 0 @@ -328,7 +335,7 @@ class PbsExecutorTest extends Specification { @Unroll def 'should get array task id' () { given: - def executor = Spy(PbsExecutor) + def executor = createExecutor() expect: executor.getArrayTaskId(JOB_ID, TASK_INDEX) == EXPECTED @@ -344,13 +351,12 @@ class PbsExecutorTest extends Specification { def task = new TaskRun() task.workDir = Paths.get('/work/dir') task.processor = Mock(TaskProcessor) - task.processor.getSession() >> Mock(Session) task.config = Mock(TaskConfig) { getClusterOptionsAsList()>>[] } and: - def executor = Spy(PbsExecutor) + def config = new ExecutorConfig(account: ACCOUNT) + def executor = createExecutor(config) executor.getJobNameFor(_) >> 'foo' executor.getName() >> 'pbs' - executor.getSession() >> Mock(Session) { getExecConfigProp('pbs', 'account',null)>>ACCOUNT } when: def result = executor.getDirectives(task, []) diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/PbsProExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/PbsProExecutorTest.groovy index b9b4ec164b..b0fb4e58d9 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/PbsProExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/PbsProExecutorTest.groovy @@ -18,7 +18,6 @@ package nextflow.executor import java.nio.file.Paths -import nextflow.Session import nextflow.processor.TaskArrayRun import nextflow.processor.TaskConfig import nextflow.processor.TaskProcessor @@ -32,11 +31,19 @@ import spock.lang.Unroll */ class PbsProExecutorTest extends Specification { + def createExecutor(config) { + Spy(PbsProExecutor) { + getConfig() >> config + } + } + + def createExecutor() { + createExecutor(new ExecutorConfig([:])) + } + def 'should get directives' () { given: - def session = Mock(Session) { getConfig()>>[:] } - def executor = Spy(PbsProExecutor) - executor.getSession() >> session + def executor = createExecutor() def WORK_DIR = Paths.get('/here') def task = Mock(TaskRun) @@ -58,9 +65,7 @@ class PbsProExecutorTest extends Specification { def 'should get directives with queue' () { given: - def session = Mock(Session) { getConfig()>>[:] } - def executor = Spy(PbsProExecutor) - executor.getSession()>>session + def executor = createExecutor() def WORK_DIR = Paths.get('/foo/bar') def task = Mock(TaskRun) @@ -83,8 +88,7 @@ class PbsProExecutorTest extends Specification { def 'should get directives with cpus' () { given: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() and: def WORK_DIR = Paths.get('/foo/bar') @@ -109,8 +113,7 @@ class PbsProExecutorTest extends Specification { def 'should get directives with mem' () { given: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() def WORK_DIR = Paths.get('/foo/bar') def task = Mock(TaskRun) @@ -134,8 +137,7 @@ class PbsProExecutorTest extends Specification { def 'should get directives with cpus and mem' () { given: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() def WORK_DIR = Paths.get('/foo/bar') def task = Mock(TaskRun) @@ -159,8 +161,7 @@ class PbsProExecutorTest extends Specification { def 'should ignore cpus and memory when clusterOptions contains -l option' () { given: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() def WORK_DIR = Paths.get('/foo/bar') def task = Mock(TaskRun) @@ -187,8 +188,7 @@ class PbsProExecutorTest extends Specification { def 'should get directives with job array' () { given: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() and: def task = Mock(TaskArrayRun) { config >> new TaskConfig() @@ -208,7 +208,7 @@ class PbsProExecutorTest extends Specification { def 'should return qstat command line' () { given: - def executor = [:] as PbsProExecutor + def executor = createExecutor() expect: executor.queueStatusCommand(null) == ['bash','-c', "set -o pipefail; qstat -f \$( qstat -B | grep -E -v '(^Server|^---)' | awk -v ORS=' ' '{print \"@\"\$1}' ) | { grep -E '(Job Id:|job_state =)' || true; }"] @@ -219,7 +219,7 @@ class PbsProExecutorTest extends Specification { def 'should parse queue status'() { setup: - def executor = [:] as PbsProExecutor + def executor = createExecutor() def text = """ Job Id: 12.localhost @@ -253,8 +253,7 @@ class PbsProExecutorTest extends Specification { def 'should report cluster as first' () { setup: - def executor = Spy(PbsProExecutor) - executor.getSession() >> Mock(Session) + def executor = createExecutor() // mock process def proc = Mock(TaskProcessor) @@ -280,7 +279,7 @@ class PbsProExecutorTest extends Specification { def 'should get array index name and start' () { given: - def executor = Spy(PbsProExecutor) + def executor = createExecutor() expect: executor.getArrayIndexName() == 'PBS_ARRAY_INDEX' executor.getArrayIndexStart() == 0 @@ -289,7 +288,7 @@ class PbsProExecutorTest extends Specification { @Unroll def 'should get array task id' () { given: - def executor = Spy(PbsProExecutor) + def executor = createExecutor() expect: executor.getArrayTaskId(JOB_ID, TASK_INDEX) == EXPECTED @@ -305,13 +304,12 @@ class PbsProExecutorTest extends Specification { def task = new TaskRun() task.workDir = Paths.get('/work/dir') task.processor = Mock(TaskProcessor) - task.processor.getSession() >> Mock(Session) task.config = Mock(TaskConfig) { getClusterOptionsAsList()>>[] } and: - def executor = Spy(PbsProExecutor) + def config = new ExecutorConfig(account: ACCOUNT) + def executor = createExecutor(config) executor.getJobNameFor(_) >> 'foo' executor.getName() >> 'pbspro' - executor.getSession() >> Mock(Session) { getExecConfigProp('pbspro', 'account',null)>>ACCOUNT } when: def result = executor.getDirectives(task, []) diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/SlurmExecutorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/SlurmExecutorTest.groovy index 4e2147eca3..66f45a60cc 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/SlurmExecutorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/SlurmExecutorTest.groovy @@ -31,10 +31,20 @@ import spock.lang.Unroll */ class SlurmExecutorTest extends Specification { + def createExecutor(config) { + Spy(SlurmExecutor) { + getConfig() >> config + } + } + + def createExecutor() { + createExecutor(new ExecutorConfig([:])) + } + def testParseJob() { given: - def exec = [:] as SlurmExecutor + def exec = createExecutor() expect: exec.parseJobId('Submitted batch job 10') == '10' @@ -54,7 +64,7 @@ class SlurmExecutorTest extends Specification { def testKill() { given: - def exec = [:] as SlurmExecutor + def exec = createExecutor() expect: exec.killTaskCommand(123) == ['scancel','123'] @@ -63,8 +73,7 @@ class SlurmExecutorTest extends Specification { @Unroll def testGetCommandLine() { given: - def session = Mock(Session) {getConfig()>>[:]} - def exec = Spy(SlurmExecutor) { getSession()>>session } + def exec = createExecutor() when: def result = exec.getSubmitCommandLine(Mock(TaskRun), Paths.get(PATH)) @@ -82,8 +91,7 @@ class SlurmExecutorTest extends Specification { setup: // SLURM executor - def executor = [:] as SlurmExecutor - executor.session = Mock(Session) + def executor = createExecutor() // mock process def proc = Mock(TaskProcessor) @@ -240,14 +248,10 @@ class SlurmExecutorTest extends Specification { def testWorkDirWithBlanks() { setup: - // LSF executor - def executor = Spy(SlurmExecutor) - executor.session = Mock(Session) + def executor = createExecutor() - // mock process def proc = Mock(TaskProcessor) - // task object def task = new TaskRun() task.processor = proc task.workDir = Paths.get('/work/some data/path') @@ -271,7 +275,7 @@ class SlurmExecutorTest extends Specification { def testQstatCommand() { setup: - def executor = [:] as SlurmExecutor + def executor = createExecutor() def text = """ 5 PD @@ -301,7 +305,7 @@ class SlurmExecutorTest extends Specification { def testQueueStatusCommand() { when: def usr = System.getProperty('user.name') - def exec = [:] as SlurmExecutor + def exec = createExecutor() then: usr exec.queueStatusCommand(null) == ['squeue','--noheader','-o','%i %t','-t','all','-u', usr] @@ -310,7 +314,7 @@ class SlurmExecutorTest extends Specification { def 'should get array index name and start' () { given: - def executor = Spy(SlurmExecutor) + def executor = createExecutor() expect: executor.getArrayIndexName() == 'SLURM_ARRAY_TASK_ID' executor.getArrayIndexStart() == 0 @@ -319,7 +323,7 @@ class SlurmExecutorTest extends Specification { @Unroll def 'should get array task id' () { given: - def executor = Spy(SlurmExecutor) + def executor = createExecutor() expect: executor.getArrayTaskId(JOB_ID, TASK_INDEX) == EXPECTED @@ -339,10 +343,10 @@ class SlurmExecutorTest extends Specification { task.processor.getSession() >> Mock(Session) task.config = Mock(TaskConfig) and: - def executor = Spy(SlurmExecutor) + def config = new ExecutorConfig(account: ACCOUNT) + def executor = createExecutor(config) executor.getJobNameFor(_) >> 'foo' executor.getName() >> 'slurm' - executor.getSession() >> Mock(Session) { getExecConfigProp('slurm', 'account',null)>>ACCOUNT } when: def result = executor.getDirectives(task, []) diff --git a/modules/nextflow/src/test/groovy/nextflow/executor/local/LocalTaskHandlerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/executor/local/LocalTaskHandlerTest.groovy index 7b5ac1e0ec..dcb4afaa2b 100644 --- a/modules/nextflow/src/test/groovy/nextflow/executor/local/LocalTaskHandlerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/executor/local/LocalTaskHandlerTest.groovy @@ -20,7 +20,7 @@ package nextflow.executor.local import java.nio.file.Path import nextflow.Global -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.file.http.XPath import nextflow.processor.TaskBean import nextflow.processor.TaskConfig @@ -63,7 +63,7 @@ class LocalTaskHandlerTest extends Specification { getContainer() >> 'ubuntu:latest' getWorkDir() >> WORK_DIR getConfig() >> Mock(TaskConfig) - getContainerConfig() >> new ContainerConfig([engine:'docker',enabled:true]) + getContainerConfig() >> new DockerConfig(enabled:true) toTaskBean() >> bean } def executor = Mock(LocalExecutor) diff --git a/modules/nextflow/src/test/groovy/nextflow/fusion/FusionHelperTest.groovy b/modules/nextflow/src/test/groovy/nextflow/fusion/FusionHelperTest.groovy index e8dc875b10..813a8935ec 100644 --- a/modules/nextflow/src/test/groovy/nextflow/fusion/FusionHelperTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/fusion/FusionHelperTest.groovy @@ -19,7 +19,8 @@ package nextflow.fusion import java.nio.file.Path -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig +import nextflow.container.SingularityConfig import nextflow.file.http.XPath import spock.lang.Specification @@ -54,21 +55,21 @@ class FusionHelperTest extends Specification { } when: - def result = FusionHelper.runWithContainer(launcher, new ContainerConfig(CONFIG), NAME, OPTS, CMD) + def result = FusionHelper.runWithContainer(launcher, CONFIG, NAME, OPTS, CMD) then: 1 * launcher.fusionEnv() >> ENV and: result == EXPECTED where: - CONFIG | ENV | NAME | OPTS | CMD | EXPECTED - [engine:'docker'] | [:] | 'image:1' | null | ['echo', 'hello'] | "docker run -i --rm --privileged image:1 echo 'hello'" - [engine:'docker'] | [FOO:'one'] | 'image:2' | null | ['echo', 'hello'] | "docker run -i -e \"FOO=one\" --rm --privileged image:2 echo 'hello'" - [engine:'docker'] | [FOO:'one'] | 'image:2' | '--this=that' | ['echo', 'hello'] | "docker run -i -e \"FOO=one\" --this=that --rm --privileged image:2 echo 'hello'" + CONFIG | ENV | NAME | OPTS | CMD | EXPECTED + new DockerConfig([:]) | [:] | 'image:1' | null | ['echo', 'hello'] | "docker run -i --rm --privileged image:1 echo 'hello'" + new DockerConfig([:]) | [FOO:'one'] | 'image:2' | null | ['echo', 'hello'] | "docker run -i -e \"FOO=one\" --rm --privileged image:2 echo 'hello'" + new DockerConfig([:]) | [FOO:'one'] | 'image:2' | '--this=that' | ['echo', 'hello'] | "docker run -i -e \"FOO=one\" --this=that --rm --privileged image:2 echo 'hello'" and: - [engine:'singularity'] | [:] | 'image:1' | null | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} singularity exec --no-home --pid image:1 echo 'hello'" - [engine:'singularity'] | [FOO:'one'] | 'image:1' | null | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} SINGULARITYENV_FOO=\"one\" singularity exec --no-home --pid image:1 echo 'hello'" - [engine:'singularity'] | [FOO:'one'] | 'image:1' | '--this=that' | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} SINGULARITYENV_FOO=\"one\" singularity exec --no-home --pid --this=that image:1 echo 'hello'" + new SingularityConfig([:]) | [:] | 'image:1' | null | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} singularity exec --no-home --pid image:1 echo 'hello'" + new SingularityConfig([:]) | [FOO:'one'] | 'image:1' | null | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} SINGULARITYENV_FOO=\"one\" singularity exec --no-home --pid image:1 echo 'hello'" + new SingularityConfig([:]) | [FOO:'one'] | 'image:1' | '--this=that' | ['echo', 'hello'] | "set +u; env - PATH=\"\$PATH\" \${TMP:+SINGULARITYENV_TMP=\"\$TMP\"} \${TMPDIR:+SINGULARITYENV_TMPDIR=\"\$TMPDIR\"} SINGULARITYENV_FOO=\"one\" singularity exec --no-home --pid --this=that image:1 echo 'hello'" } diff --git a/modules/nextflow/src/test/groovy/nextflow/mail/MailerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/mail/MailerTest.groovy index 8bc2395886..c6249daeef 100644 --- a/modules/nextflow/src/test/groovy/nextflow/mail/MailerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/mail/MailerTest.groovy @@ -16,10 +16,6 @@ package nextflow.mail -import spock.lang.IgnoreIf -import spock.lang.Specification -import spock.lang.Unroll - import java.nio.file.Files import java.nio.file.Path import javax.mail.Message @@ -27,7 +23,11 @@ import javax.mail.internet.InternetAddress import javax.mail.internet.MimeMessage import javax.mail.internet.MimeMultipart +import nextflow.SysEnv import org.subethamail.wiser.Wiser +import spock.lang.IgnoreIf +import spock.lang.Specification +import spock.lang.Unroll import spock.util.environment.RestoreSystemProperties @IgnoreIf({System.getenv('NXF_SMOKE')}) @@ -37,7 +37,7 @@ class MailerTest extends Specification { def 'should return config properties'() { when: def SMTP = [host: 'google.com', port: '808', user: 'foo', password: 'bar'] - def mailer = new Mailer( config: [smtp: SMTP, other: 1] ) + def mailer = new Mailer( [smtp: SMTP, other: 1] ) def props = mailer.createProps() then: @@ -56,7 +56,7 @@ class MailerTest extends Specification { System.setProperty('http.proxyHost', 'foo.com') System.setProperty('http.proxyPort', '8000') - def mailer = new Mailer( config: [smtp: [host: 'gmail.com', port: 25, user:'yo']] ) + def mailer = new Mailer( [smtp: [host: 'gmail.com', port: 25, user:'yo']] ) when: def props = mailer.createProps() @@ -80,7 +80,7 @@ class MailerTest extends Specification { server.start() def SMTP = [ host: 'localhost', port: PORT, user: USER, password: PASSWORD] - def mailer = new Mailer( config: [smtp: SMTP]) + def mailer = new Mailer([smtp: SMTP]) String TO = "receiver@nextflow.io" String FROM = 'paolo@gmail.com' @@ -120,7 +120,7 @@ class MailerTest extends Specification { server.start() def SMTP = [ host: '127.0.0.1', port: PORT, user: USER, password: PASSWORD] - def mailer = new Mailer( config: [smtp: SMTP]) + def mailer = new Mailer([smtp: SMTP]) String TO = "receiver@gmail.com" String FROM = 'paolo@nextflow.io' @@ -157,13 +157,12 @@ class MailerTest extends Specification { def 'should send with java' () { given: - def mailer = Spy(Mailer) + def mailer = Spy(new Mailer([smtp: [host:'foo.com'] ])) def MSG = Mock(MimeMessage) def mail = new Mail() def provider = Spy(new JavaMailProvider()) when: - mailer.config = [smtp: [host:'foo.com'] ] mailer.send(mail) then: 0 * mailer.getSysMailer() >> null @@ -224,14 +223,14 @@ class MailerTest extends Specification { when: mail = new Mail(from:'foo@gmail.com') - msg = new Mailer(config: [from:'fallback@hotmail.com']).createMimeMessage(mail) + msg = new Mailer([from:'fallback@hotmail.com']).createMimeMessage(mail) then: msg.getFrom().size()==1 msg.getFrom()[0].toString() == 'foo@gmail.com' when: mail = new Mail() - msg = new Mailer(config: [from:'fallback@hotmail.com']).createMimeMessage(mail) + msg = new Mailer([from:'fallback@hotmail.com']).createMimeMessage(mail) then: msg.getFrom().size()==1 msg.getFrom()[0].toString() == 'fallback@hotmail.com' @@ -285,33 +284,37 @@ class MailerTest extends Specification { def 'should fetch config properties' () { given: - def ENV = [NXF_SMTP_USER: 'jim', NXF_SMTP_PASSWORD: 'secret', NXF_SMTP_HOST: 'g.com', NXF_SMTP_PORT: '864'] + SysEnv.push(NXF_SMTP_USER: 'jim', NXF_SMTP_PASSWORD: 'secret', NXF_SMTP_HOST: 'g.com', NXF_SMTP_PORT: '864') + and: def SMTP = [host:'hola.com', user:'foo', password: 'bar', port: 234] - Mailer mail + Mailer mailer when: - mail = new Mailer(config: [smtp: SMTP]) + mailer = new Mailer([smtp: SMTP]) then: - mail.host == 'hola.com' - mail.user == 'foo' - mail.password == 'bar' - mail.port == 234 + mailer.host == 'hola.com' + mailer.user == 'foo' + mailer.password == 'bar' + mailer.port == 234 when: - mail = new Mailer(config: [smtp: [host: 'local', port: '999']], env: ENV) + mailer = new Mailer([smtp: [host: 'local', port: '999']]) then: - mail.host == 'local' - mail.port == 999 - mail.user == 'jim' - mail.password == 'secret' + mailer.host == 'local' + mailer.port == 999 + mailer.user == 'jim' + mailer.password == 'secret' when: - mail = new Mailer(env: ENV) + mailer = new Mailer([:]) then: - mail.host == 'g.com' - mail.port == 864 - mail.user == 'jim' - mail.password == 'secret' + mailer.host == 'g.com' + mailer.port == 864 + mailer.user == 'jim' + mailer.password == 'secret' + + cleanup: + SysEnv.pop() } def 'should config the mailer' () { diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy index e6f6150ce5..244f66288f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/LocalPollingMonitorTest.groovy @@ -21,6 +21,7 @@ import java.lang.management.ManagementFactory import com.sun.management.OperatingSystemMXBean import nextflow.Session import nextflow.exception.ProcessUnrecoverableException +import nextflow.executor.ExecutorConfig import nextflow.util.MemoryUnit import spock.lang.Specification /** @@ -219,24 +220,24 @@ class LocalPollingMonitorTest extends Specification { def OS = (OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean() when: - def session1 = new Session() + def config1 = new ExecutorConfig([:]) then: - LocalPollingMonitor.configCpus(session1,'local') == OS.getAvailableProcessors() + LocalPollingMonitor.configCpus(config1,'local') == OS.getAvailableProcessors() when: - def session2 = new Session([executor: [cpus: 100]]) + def config2 = new ExecutorConfig([cpus: 100]) then: - LocalPollingMonitor.configCpus(session2,'local') == 100 + LocalPollingMonitor.configCpus(config2,'local') == 100 when: - def session3 = new Session([executor: ['$local': [cpus: 100]]]) + def config3 = new ExecutorConfig(['$local': [cpus: 100]]) then: - LocalPollingMonitor.configCpus(session3,'local') == 100 + LocalPollingMonitor.configCpus(config3,'local') == 100 when: - def session4 = new Session([executor: ['$sge': [cpus: 100]]]) + def config4 = new ExecutorConfig(['$sge': [cpus: 100]]) then: - LocalPollingMonitor.configCpus(session4,'local') == OS.getAvailableProcessors() + LocalPollingMonitor.configCpus(config4,'local') == OS.getAvailableProcessors() } @@ -247,24 +248,24 @@ class LocalPollingMonitorTest extends Specification { def _10_GB = MemoryUnit.of('10 GB').toBytes() when: - def session1 = new Session() + def config1 = new ExecutorConfig([:]) then: - LocalPollingMonitor.configMem(session1,'local') == OS.getTotalPhysicalMemorySize() + LocalPollingMonitor.configMem(config1,'local') == OS.getTotalPhysicalMemorySize() when: - def session2 = new Session([executor: [memory: '10 GB']]) + def config2 = new ExecutorConfig([memory: '10 GB']) then: - LocalPollingMonitor.configMem(session2,'local') == _10_GB + LocalPollingMonitor.configMem(config2,'local') == _10_GB when: - def session3 = new Session([executor: ['$local': [memory: _10_GB]]]) + def config3 = new ExecutorConfig(['$local': [memory: _10_GB]]) then: - LocalPollingMonitor.configMem(session3,'local') == _10_GB + LocalPollingMonitor.configMem(config3,'local') == _10_GB when: - def session4 = new Session([executor: ['$sge': [memory: '1 GB']]]) + def config4 = new ExecutorConfig(['$sge': [memory: '1 GB']]) then: - LocalPollingMonitor.configMem(session4,'local') == OS.getTotalPhysicalMemorySize() + LocalPollingMonitor.configMem(config4,'local') == OS.getTotalPhysicalMemorySize() } } diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy index 4cfcb2a1fd..918b4dab22 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskBeanTest.groovy @@ -20,7 +20,7 @@ import java.nio.file.Paths import nextflow.Session import nextflow.conda.CondaConfig -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.executor.Executor import nextflow.script.ProcessConfig import nextflow.util.MemoryUnit @@ -69,7 +69,7 @@ class TaskBeanTest extends Specification { task.getTargetDir() >> Paths.get('/target/work/dir') task.getEnvironment() >> [alpha: 'one', beta: 'xxx', gamma: 'yyy'] task.getContainer() >> 'busybox:latest' - task.getContainerConfig() >> [docker: true, registry: 'x'] + task.getContainerConfig() >> new DockerConfig(registry: 'x') task.getCondaConfig() >> new CondaConfig([useMicromamba:true], [:]) when: @@ -90,7 +90,7 @@ class TaskBeanTest extends Specification { bean.afterScript == 'after do that' bean.containerImage == 'busybox:latest' - bean.containerConfig == [docker: true, registry: 'x'] as ContainerConfig + bean.containerConfig == new DockerConfig(registry: 'x') bean.containerMemory == new MemoryUnit('1GB') bean.statsEnabled diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskPollingMonitorTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskPollingMonitorTest.groovy index 2c85462ff8..5889a6c1ea 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskPollingMonitorTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskPollingMonitorTest.groovy @@ -19,6 +19,7 @@ package nextflow.processor import java.util.concurrent.atomic.LongAdder import nextflow.Session +import nextflow.executor.ExecutorConfig import nextflow.util.Duration import nextflow.util.RateUnit import nextflow.util.ThrottlingExecutor @@ -34,12 +35,13 @@ class TaskPollingMonitorTest extends Specification { setup: def name = 'hello' - def session = new Session( executor: [pollInterval: '1h', queueSize: 11, dumpInterval: '3h'] ) + def session = Mock(Session) + def config = new ExecutorConfig(pollInterval: '1h', queueSize: 11, dumpInterval: '3h') def defSize = 99 def defPollDuration = Duration.of('44s') when: - def monitor = TaskPollingMonitor.create(session, name, defSize, defPollDuration) + def monitor = TaskPollingMonitor.create(session, config, name, defSize, defPollDuration) then: monitor.name == 'hello' monitor.pollIntervalMillis == Duration.of('1h').toMillis() @@ -52,12 +54,12 @@ class TaskPollingMonitorTest extends Specification { given: def session = Mock(Session) - def monitor = new TaskPollingMonitor(name:'local', session: session, pollInterval: '1s', capacity: 100) + def config = new ExecutorConfig(submitRateLimit: RATE) + def monitor = new TaskPollingMonitor(name:'local', session: session, config: config, pollInterval: '1s', capacity: 100) when: def limit = monitor.createSubmitRateLimit() then: - 1 * session.getExecConfigProp('local','submitRateLimit', null) >> RATE limit ? Math.round(limit.getRate()) : null == EXPECTED where: diff --git a/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy b/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy index 80492ed71c..6311db47b5 100644 --- a/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/processor/TaskRunTest.groovy @@ -22,7 +22,8 @@ import java.nio.file.Paths import ch.artecat.grengine.Grengine import nextflow.Session import nextflow.ast.TaskCmdXform -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig +import nextflow.container.PodmanConfig import nextflow.container.resolver.ContainerInfo import nextflow.container.resolver.ContainerMeta import nextflow.container.resolver.ContainerResolver @@ -347,7 +348,7 @@ class TaskRunTest extends Specification { when: def image = task.getContainer() then: - task.getContainerConfig() >> [docker:[enabled: true]] + task.getContainerConfig() >> new DockerConfig([:]) image == EXPECTED where: @@ -606,7 +607,7 @@ class TaskRunTest extends Specification { when: def enabled = task.isContainerEnabled() then: - 1 * task.getContainerConfig() >> new ContainerConfig([enabled: false]) + 1 * task.getContainerConfig() >> new DockerConfig([enabled: false]) 0 * task.getContainer() >> null !enabled @@ -615,7 +616,7 @@ class TaskRunTest extends Specification { then: // NO container image is specified => NOT enable even if `enabled` flag is set to true _ * task.getContainer() >> null - _ * task.getContainerConfig() >> new ContainerConfig([enabled: true]) + _ * task.getContainerConfig() >> new DockerConfig([enabled: true]) !enabled when: @@ -623,7 +624,7 @@ class TaskRunTest extends Specification { then: // container is specified, not enabled _ * task.getContainer() >> 'foo/bar' - _ * task.getContainerConfig() >> new ContainerConfig([:]) + _ * task.getContainerConfig() >> new DockerConfig([:]) !enabled when: @@ -631,7 +632,7 @@ class TaskRunTest extends Specification { then: // container is specified AND enabled => enabled _ * task.getContainer() >> 'foo/bar' - _ * task.getContainerConfig() >> new ContainerConfig([enabled: true]) + _ * task.getContainerConfig() >> new DockerConfig([enabled: true]) enabled } @@ -844,7 +845,7 @@ class TaskRunTest extends Specification { and: session.getContainerConfig(null) >> null and: - config == new ContainerConfig(engine:'docker') + config == new DockerConfig([:]) when: config = task.getContainerConfig() @@ -852,23 +853,9 @@ class TaskRunTest extends Specification { 1 * executor.containerConfigEngine() >> null 1 * executor.isContainerNative() >> false and: - session.getContainerConfig(null) >> new ContainerConfig(engine:'podman', registry:'xyz') + session.getContainerConfig(null) >> new PodmanConfig(registry:'xyz') and: - config == new ContainerConfig(engine:'podman', registry:'xyz') - - - when: - config = task.getContainerConfig() - then: - // a container native is returned - 1 * executor.containerConfigEngine() >> 'foo' - 1 * executor.isContainerNative() >> true - and: - // the engine 'foo' is passed as argument - session.getContainerConfig('foo') >> new ContainerConfig(engine:'foo') - and: - // the engine is enabled by default - config == new ContainerConfig(engine:'foo', enabled: true) // <-- 'foo' engine is enabled + config == new PodmanConfig(registry:'xyz') } def 'should get container info' () { diff --git a/modules/nextflow/src/test/groovy/nextflow/script/ScriptRunnerTest.groovy b/modules/nextflow/src/test/groovy/nextflow/script/ScriptRunnerTest.groovy index bd2dc3e4bb..d2f6a2efdd 100644 --- a/modules/nextflow/src/test/groovy/nextflow/script/ScriptRunnerTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/script/ScriptRunnerTest.groovy @@ -269,7 +269,7 @@ class ScriptRunnerTest extends Dsl2Spec { } ''' and: - def config = [executor: 'nope', env: [HELLO: 'Hello world!']] + def config = [process: [executor: 'nope'], env: [HELLO: 'Hello world!']] expect: new MockScriptRunner(config).setScript(script).execute().val == 'Hello world!' @@ -297,7 +297,7 @@ class ScriptRunnerTest extends Dsl2Spec { } ''' and: - def config = [executor: 'nope'] + def config = [process: [executor: 'nope']] expect: new MockScriptRunner(config).setScript(script).execute().val == 'cat filename' @@ -310,8 +310,8 @@ class ScriptRunnerTest extends Dsl2Spec { given: // -- this represent the configuration file def config = ''' - executor = 'nope' process { + executor = 'nope' memory = '333' withName: hola { cpus = '222'; time = '555' } withName: ciao { cpus = '999' } @@ -350,9 +350,8 @@ class ScriptRunnerTest extends Dsl2Spec { given: // -- this represent the configuration file def config = ''' - executor = 'nope' - process { + executor = 'nope' memory = '333' withName: hola { @@ -398,7 +397,7 @@ class ScriptRunnerTest extends Dsl2Spec { given: // -- this represent the configuration file def config = ''' - executor = 'nope' + process.executor = 'nope' process.module = 'a/1' ''' @@ -432,8 +431,8 @@ class ScriptRunnerTest extends Dsl2Spec { * the module defined in the config file 'b/2' has priority and overrides the 'a/1' and 'c/3' */ def config = ''' - executor = 'nope' process { + executor = 'nope' module = 'a/1' withName: hola { module = 'b/2:z/9' } } @@ -470,7 +469,7 @@ class ScriptRunnerTest extends Dsl2Spec { * the module defined in the config file 'b/2' has priority and overrides the 'a/1' and 'c/3' */ def config = ''' - executor = 'nope' + process.executor = 'nope' process.module = 'a/1' ''' @@ -501,8 +500,8 @@ class ScriptRunnerTest extends Dsl2Spec { given: // -- this represent the configuration file def config = ''' - executor = 'nope' process { + executor = 'nope' queue = 'short' cpus = 2 time = '6 hour' @@ -663,7 +662,7 @@ class ScriptRunnerTest extends Dsl2Spec { * the module defined in the config file 'b/2' has priority and overrides the 'a/1' and 'c/3' */ def config = ''' - executor = 'nope' + process.executor = 'nope' stubRun = true ''' @@ -703,7 +702,7 @@ class ScriptRunnerTest extends Dsl2Spec { * the module defined in the config file 'b/2' has priority and overrides the 'a/1' and 'c/3' */ def config = ''' - executor = 'nope' + process.executor = 'nope' stubRun = true ''' @@ -740,7 +739,7 @@ class ScriptRunnerTest extends Dsl2Spec { * the module defined in the config file 'b/2' has priority and overrides the 'a/1' and 'c/3' */ def config = ''' - executor = 'nope' + process.executor = 'nope' stubRun = true ''' diff --git a/modules/nextflow/src/test/groovy/nextflow/script/WorkflowNotifierTest.groovy b/modules/nextflow/src/test/groovy/nextflow/script/WorkflowNotifierTest.groovy index 667d1ea15b..f949ff4bc2 100644 --- a/modules/nextflow/src/test/groovy/nextflow/script/WorkflowNotifierTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/script/WorkflowNotifierTest.groovy @@ -26,6 +26,7 @@ import nextflow.NextflowMeta import nextflow.mail.Attachment import nextflow.mail.Mail import nextflow.mail.Mailer +import nextflow.mail.Notification import nextflow.script.FusionMetadata import nextflow.script.WaveMetadata import nextflow.trace.WorkflowStats @@ -72,7 +73,7 @@ class WorkflowNotifierTest extends Specification { stats: new WorkflowStats(succeedMillis: 4_000_000, succeededCount: 10, failedCount: 20, cachedCount: 30, ignoredCount: 0) ) - def notifier = new WorkflowNotifier(variables: [workflow:meta], config: [:]) + def notifier = new WorkflowNotifier([workflow: meta], meta) when: meta.success = true @@ -206,7 +207,7 @@ class WorkflowNotifierTest extends Specification { stats: new WorkflowStats(succeedMillis: 4000) ) - def notifier = new WorkflowNotifier(workflow: meta, config: [:], variables: [workflow:meta]) + def notifier = new WorkflowNotifier([workflow: meta], meta) when: meta.success = true @@ -228,7 +229,7 @@ class WorkflowNotifierTest extends Specification { def 'should normalise template list' () { given: - def notifier = new WorkflowNotifier() + def notifier = new WorkflowNotifier([:], null) expect: notifier.normaliseTemplate0('foo', []) == [new File('foo')] @@ -240,8 +241,7 @@ class WorkflowNotifierTest extends Specification { given: Mail mail def workflow = new WorkflowMetadata() - def notifier = Spy(WorkflowNotifier) - notifier.@workflow = workflow + def notifier = Spy(new WorkflowNotifier([:], workflow)) def attach = Mock(Attachment) /* @@ -250,7 +250,7 @@ class WorkflowNotifierTest extends Specification { when: workflow.success = true workflow.runName = 'foo' - mail = notifier.createMail([to:'paolo@yo.com', from:'bot@nextflow.com']) + mail = notifier.createMail(new Notification(to:'paolo@yo.com', from:'bot@nextflow.com')) then: 1 * notifier.loadDefaultTextTemplate() >> 'TEXT template' 1 * notifier.loadDefaultHtmlTemplate() >> 'HTML template' @@ -268,7 +268,7 @@ class WorkflowNotifierTest extends Specification { when: workflow.success = false workflow.runName = 'bar' - mail = notifier.createMail([to:'alpha@dot.com', from:'beta@dot.com', template: ['/some/file.txt', '/other/file.html'], attributes: [one:1, two:2]]) + mail = notifier.createMail(new Notification(to:'alpha@dot.com', from:'beta@dot.com', template: ['/some/file.txt', '/other/file.html'], attributes: [one:1, two:2])) then: 1 * notifier.loadMailTemplate(new File('/some/file.txt'), [one:1, two:2]) >> 'TEXT template' 1 * notifier.loadMailTemplate(new File('/other/file.html'), [one:1, two:2]) >> 'HTML template' @@ -288,8 +288,7 @@ class WorkflowNotifierTest extends Specification { and: Mail mail def workflow = new WorkflowMetadata() - def notifier = Spy(WorkflowNotifier) - notifier.@workflow = workflow + def notifier = Spy(new WorkflowNotifier([:], workflow)) /* * create success completion *default* notification email @@ -297,7 +296,7 @@ class WorkflowNotifierTest extends Specification { when: workflow.success = true workflow.runName = 'foo' - mail = notifier.createMail([to:'paolo@yo.com', from:'bot@nextflow.com', template: template]) + mail = notifier.createMail(new Notification(to:'paolo@yo.com', from:'bot@nextflow.com', template: template)) then: 0 * notifier.loadDefaultTextTemplate() 0 * notifier.loadDefaultHtmlTemplate() @@ -319,7 +318,7 @@ class WorkflowNotifierTest extends Specification { given: def workflow = Mock(WorkflowMetadata) - def notifier = Spy(WorkflowNotifier) + def notifier = Spy(new WorkflowNotifier([:], workflow)) def CONFIG_NOTIFIER = [ enabled: true, from: 'me@nextflow.io', @@ -333,19 +332,16 @@ class WorkflowNotifierTest extends Specification { def MAILER = Mock(Mailer) when: - notifier.@workflow = workflow - notifier.@config = [notification: CONFIG_NOTIFIER, mail: CONFIG_MAIL] - notifier.sendNotification() + notifier.sendNotification(notification: CONFIG_NOTIFIER, mail: CONFIG_MAIL) then: - 1 * notifier.createMail(CONFIG_NOTIFIER) >> MAIL - 1 * notifier.createMailer(CONFIG_MAIL) >> MAILER + 1 * notifier.createMail(_) >> MAIL + 1 * notifier.createMailer(_) >> MAILER 1 * MAILER.send(MAIL) when: ''' `enabled` flag is false, notification is NOT sent ''' - notifier.@config = [notification: [enabled: false, to:'you@dot.com']] - notifier.sendNotification() + notifier.sendNotification(notification: [enabled: false, to:'you@dot.com']) then: 0 * notifier.createMail(_) >> null 0 * notifier.createMailer(_) >> MAILER @@ -354,8 +350,7 @@ class WorkflowNotifierTest extends Specification { when: ''' notification is implicitly enabled if recipient field is provided ''' - notifier.@config = [notification: [to:'you@dot.com']] - notifier.sendNotification() + notifier.sendNotification(notification: [to:'you@dot.com']) then: 0 * notifier.createMail(_) >> null 0 * notifier.createMailer(_) >> MAILER diff --git a/modules/nextflow/src/test/groovy/nextflow/spack/SpackCacheTest.groovy b/modules/nextflow/src/test/groovy/nextflow/spack/SpackCacheTest.groovy index ae93296862..a2cbc74ce1 100644 --- a/modules/nextflow/src/test/groovy/nextflow/spack/SpackCacheTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/spack/SpackCacheTest.groovy @@ -187,7 +187,8 @@ class SpackCacheTest extends Specification { def 'should get options from the config' () { when: - def cache = new SpackCache(new SpackConfig()) + def config = new SpackConfig() + def cache = new SpackCache(config) then: cache.createTimeout.minutes == 60 cache.configCacheDir0 == null @@ -195,7 +196,8 @@ class SpackCacheTest extends Specification { cache.parallelBuilds == null when: - cache = new SpackCache(new SpackConfig(createTimeout: '5 min', cacheDir: '/spack/cache', checksum: false, parallelBuilds: 2)) + config = new SpackConfig([createTimeout: '5 min', cacheDir: '/spack/cache', checksum: false, parallelBuilds: 2], [:]) + cache = new SpackCache(config) then: cache.createTimeout.minutes == 5 cache.configCacheDir0 == Paths.get('/spack/cache') @@ -207,7 +209,7 @@ class SpackCacheTest extends Specification { given: def folder = Files.createTempDirectory('test'); folder.deleteDir() - def config = new SpackConfig(cacheDir: folder.toString()) + def config = new SpackConfig([cacheDir: folder.toString()], [:]) SpackCache cache = Spy(SpackCache, constructorArgs: [config]) when: @@ -225,7 +227,7 @@ class SpackCacheTest extends Specification { given: def folder = Paths.get('.test-spack-cache-' + Math.random()) - def config = new SpackConfig(cacheDir: folder.toString()) + def config = new SpackConfig([cacheDir: folder.toString()], [:]) SpackCache cache = Spy(SpackCache, constructorArgs: [config]) when: diff --git a/modules/nextflow/src/test/groovy/nextflow/spack/SpackConfigTest.groovy b/modules/nextflow/src/test/groovy/nextflow/spack/SpackConfigTest.groovy index e288405d46..f9dc660f87 100644 --- a/modules/nextflow/src/test/groovy/nextflow/spack/SpackConfigTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/spack/SpackConfigTest.groovy @@ -39,9 +39,9 @@ class SpackConfigTest extends Specification { false | [enabled: false] | [:] true | [enabled: true] | [:] and: - false | [:] | [NXF_SPACK_ENABLED: false] - true | [:] | [NXF_SPACK_ENABLED: true] - false | [enabled: false] | [NXF_SPACK_ENABLED: true] // <-- config has priority - true | [enabled: true] | [NXF_SPACK_ENABLED: true] + false | [:] | [NXF_SPACK_ENABLED: 'false'] + true | [:] | [NXF_SPACK_ENABLED: 'true'] + false | [enabled: false] | [NXF_SPACK_ENABLED: 'true'] // <-- config has priority + true | [enabled: true] | [NXF_SPACK_ENABLED: 'true'] } } diff --git a/modules/nextflow/src/test/groovy/nextflow/trace/GraphObserverTest.groovy b/modules/nextflow/src/test/groovy/nextflow/trace/GraphObserverTest.groovy index f7c8a15124..bb0a54ce2f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/trace/GraphObserverTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/trace/GraphObserverTest.groovy @@ -16,7 +16,7 @@ package nextflow.trace import java.nio.file.Files -import java.nio.file.Paths +import java.nio.file.Path import groovyx.gpars.dataflow.DataflowQueue import nextflow.Session @@ -25,6 +25,7 @@ import nextflow.dag.DAG import nextflow.dag.DotRenderer import nextflow.dag.GraphvizRenderer import nextflow.dag.MermaidRenderer +import nextflow.trace.config.DagConfig import spock.lang.Requires import spock.lang.Specification import test.TestHelper @@ -37,6 +38,10 @@ class GraphObserverTest extends Specification { DAG test_dag + def createObserver(Path file) { + new GraphObserver(new DagConfig(file: file.toString())) + } + def setup() { new Session() @@ -77,7 +82,7 @@ class GraphObserverTest extends Specification { def 'should write a dot file' () { given: def file = Files.createTempFile('nxf_','.dot') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -104,7 +109,7 @@ class GraphObserverTest extends Specification { def 'should write an html file' () { given: def file = Files.createTempFile('nxf-','.html') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -133,7 +138,7 @@ class GraphObserverTest extends Specification { def 'should write an svg file' () { given: def file = Files.createTempFile('nxf-','.svg') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -151,7 +156,7 @@ class GraphObserverTest extends Specification { def 'should write a png file' () { given: def file = Files.createTempFile('nxf-','.png') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -168,7 +173,7 @@ class GraphObserverTest extends Specification { def 'should write a pdf file' () { given: def file = Files.createTempFile('nxf-','.pdf') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -185,7 +190,7 @@ class GraphObserverTest extends Specification { given: def folder = Files.createTempDirectory('test') def file = folder.resolve('nope') - def gr = new GraphObserver(file) + def gr = createObserver(file) gr.dag = test_dag when: @@ -215,35 +220,35 @@ class GraphObserverTest extends Specification { def observer when: - observer = new GraphObserver(Paths.get('/path/to/hello-world.dot')) + observer = createObserver(Path.of('/path/to/hello-world.dot')) then: observer.name == 'hello-world' observer.format == 'dot' observer.createRender() instanceof DotRenderer when: - observer = new GraphObserver(Paths.get('/path/to/TheGraph.html')) + observer = createObserver(Path.of('/path/to/TheGraph.html')) then: observer.name == 'TheGraph' observer.format == 'html' observer.createRender() instanceof MermaidHtmlRenderer when: - observer = new GraphObserver(Paths.get('/path/to/TheGraph.mmd')) + observer = createObserver(Path.of('/path/to/TheGraph.mmd')) then: observer.name == 'TheGraph' observer.format == 'mmd' observer.createRender() instanceof MermaidRenderer when: - observer = new GraphObserver(Paths.get('/path/to/TheGraph.SVG')) + observer = createObserver(Path.of('/path/to/TheGraph.SVG')) then: observer.name == 'TheGraph' observer.format == 'svg' observer.createRender() instanceof GraphvizRenderer when: - observer = new GraphObserver(Paths.get('/path/to/anonymous')) + observer = createObserver(Path.of('/path/to/anonymous')) then: observer.name == 'anonymous' observer.format == 'html' diff --git a/modules/nextflow/src/test/groovy/nextflow/trace/ReportObserverTest.groovy b/modules/nextflow/src/test/groovy/nextflow/trace/ReportObserverTest.groovy index 01aae0ded5..b6b1e0ee2f 100644 --- a/modules/nextflow/src/test/groovy/nextflow/trace/ReportObserverTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/trace/ReportObserverTest.groovy @@ -27,6 +27,7 @@ import nextflow.processor.TaskId import nextflow.script.FusionMetadata import nextflow.script.WaveMetadata import nextflow.script.WorkflowMetadata +import nextflow.trace.config.ReportConfig import nextflow.trace.event.TaskEvent import spock.lang.Specification import test.TestHelper @@ -129,7 +130,7 @@ class ReportObserverTest extends Specification { ) def file = TestHelper.createInMemTempFile('report.html') - def observer = Spy(ReportObserver, constructorArgs: [file]) + def observer = Spy(new ReportObserver(reportFile: file)) when: observer.renderHtml() @@ -154,7 +155,7 @@ class ReportObserverTest extends Specification { def aggregator = Mock(ResourcesAggregator) def file = TestHelper.createInMemTempFile('report.html') - ReportObserver observer = Spy(ReportObserver, constructorArgs: [file]) + def observer = Spy(new ReportObserver(reportFile: file)) observer.getWorkflowMetadata() >> workflow observer.@aggregator = aggregator @@ -195,9 +196,9 @@ class ReportObserverTest extends Specification { def 'should render not tasks payload' () { given: - def observer = Spy(ReportObserver) + def observer = Spy(new ReportObserver(new ReportConfig([:]))) def BIG = Mock(Map) - BIG.size() >> ReportObserver.DEF_MAX_TASKS+1 + BIG.size() >> ReportConfig.DEF_MAX_TASKS+1 when: def result = observer.renderTasksJson() @@ -208,7 +209,7 @@ class ReportObserverTest extends Specification { def 'should render tasks payload' () { given: - def observer = Spy(ReportObserver) + def observer = Spy(new ReportObserver(new ReportConfig([:]))) def TASKID1 = TaskId.of(10) def TASKID2 = TaskId.of(20) diff --git a/modules/nextflow/src/test/groovy/nextflow/trace/TimelineObserverTest.groovy b/modules/nextflow/src/test/groovy/nextflow/trace/TimelineObserverTest.groovy index 888f16c2aa..e2195dfffe 100644 --- a/modules/nextflow/src/test/groovy/nextflow/trace/TimelineObserverTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/trace/TimelineObserverTest.groovy @@ -131,7 +131,7 @@ class TimelineObserverTest extends Specification { h3.getTraceRecord() >> r3 when: - def observer = new TimelineObserver(Mock(Path)) + def observer = new TimelineObserver() observer.onTaskComplete(new TaskEvent(h1, h1.getTraceRecord())) observer.onTaskComplete(new TaskEvent(h2, h2.getTraceRecord())) observer.onTaskComplete(new TaskEvent(h3, h3.getTraceRecord())) @@ -174,7 +174,7 @@ class TimelineObserverTest extends Specification { r3.peak_rss = 70_000_000 def file = TestHelper.createInMemTempFile('report.html') - def observer = new TimelineObserver(file) + def observer = new TimelineObserver(reportFile: file) observer.beginMillis = 1000 observer.startMillis = 1000 observer.endMillis = 3500 diff --git a/modules/nextflow/src/test/groovy/nextflow/trace/TraceFileObserverTest.groovy b/modules/nextflow/src/test/groovy/nextflow/trace/TraceFileObserverTest.groovy index 307c8e5cf2..0b0771ce7a 100644 --- a/modules/nextflow/src/test/groovy/nextflow/trace/TraceFileObserverTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/trace/TraceFileObserverTest.groovy @@ -109,7 +109,7 @@ class TraceFileObserverTest extends Specification { def now = System.currentTimeMillis() // the observer class under test - def observer = new TraceFileObserver(file) + def observer = new TraceFileObserver(tracePath: file) when: observer.onFlowCreate(null) diff --git a/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy b/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy index cf85e46c48..d80274fd60 100644 --- a/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/util/ConfigHelperTest.groovy @@ -30,21 +30,6 @@ import spock.lang.Unroll */ class ConfigHelperTest extends Specification { - @Unroll - def "get config property" () { - - expect: - ConfigHelper.getConfigProperty(config, execName, 'foo') == value - - where: - config | execName | value - [foo: 0] | null | 0 - [foo: 100] | null | 100 - [foo: 'bar'] | null | 'bar' - [$sge: [foo: 'bar']] | 'sge' | 'bar' - - } - @Unroll def "should parse string value: #str" () { diff --git a/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy b/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy index 35c3ef6352..c1fa9c6e75 100644 --- a/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/util/KryoHelperTest.groovy @@ -17,7 +17,7 @@ package nextflow.util import groovy.transform.EqualsAndHashCode -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.file.FileHelper import nextflow.io.SerializableMarker import spock.lang.Specification @@ -118,15 +118,15 @@ class KryoHelperTest extends Specification { def testSerializeContainerConfig() { given: - def cfg = new ContainerConfig([enabled: true, engine: 'docker', xxx: 'hello']) + def cfg = new DockerConfig([enabled: true, runOptions: 'hello']) when: def copy = KryoHelper.deserialize(KryoHelper.serialize(cfg)) then: copy == cfg - copy instanceof ContainerConfig + copy instanceof DockerConfig copy.engine == 'docker' copy.enabled == true - copy.xxx == 'hello' + copy.runOptions == 'hello' } diff --git a/modules/nf-lang/src/main/java/nextflow/config/schema/ConfigOption.java b/modules/nf-lang/src/main/java/nextflow/config/schema/ConfigOption.java index d5f2be7e46..2805559983 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/schema/ConfigOption.java +++ b/modules/nf-lang/src/main/java/nextflow/config/schema/ConfigOption.java @@ -21,6 +21,7 @@ import java.lang.annotation.Target; @Retention(RetentionPolicy.RUNTIME) -@Target({ ElementType.FIELD, ElementType.METHOD }) +@Target(ElementType.FIELD) public @interface ConfigOption { + Class[] types() default {}; } diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsBatchConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsBatchConfig.java deleted file mode 100644 index f2ba4eba35..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsBatchConfig.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.List; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class AwsBatchConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The path where the AWS command line tool is installed in the host AMI. - """) - public String cliPath; - - @ConfigOption - @Description(""" - Delay between download attempts from S3 (default: `10 sec`). - """) - public Duration delayBetweenAttempts; - - @ConfigOption - @Description(""" - The AWS Batch Execution Role ARN that needs to be used to execute the Batch Job. - - [Read more](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) - """) - public String executionRole; - - @ConfigOption - @Description(""" - The AWS Batch Job Role ARN that needs to be used to execute the Batch Job. - """) - public String jobRole; - - @ConfigOption - @Description(""" - The name of the logs group used by Batch Jobs (default: `/aws/batch`). - """) - public String logsGroup; - - @ConfigOption - @Description(""" - Max parallel upload/download transfer operations *per job* (default: `4`). - """) - public int maxParallelTransfers; - - @ConfigOption - @Description(""" - Max number of execution attempts of a job interrupted by a EC2 spot reclaim event (default: `5`) - """) - public int maxSpotAttempts; - - @ConfigOption - @Description(""" - Max number of downloads attempts from S3 (default: `1`). - """) - public int maxTransferAttempts; - - @ConfigOption - @Description(""" - The compute platform type used by AWS Batch. Can be either `ec2` or `fargate`. - """) - public String platformType; - - @ConfigOption - @Description(""" - The retry mode used to accommodate rate-limiting on AWS services. Can be one of `standard`, `legacy`, `adaptive`, or `built-in` (default: `standard`). - - [Read more](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-retries.html) - """) - public String retryMode; - - @ConfigOption - @Description(""" - The scheduling priority for all tasks when using fair-share scheduling for AWS Batch (default: `0`). - - [Read more](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) - """) - public int schedulingPriority; - - @ConfigOption - @Description(""" - The share identifier for all tasks when using fair-share scheduling for AWS Batch. - - [Read more](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) - """) - public String shareIdentifier; - - @ConfigOption - @Description(""" - When true, jobs that cannot be scheduled for lack of resources or misconfiguration are terminated automatically (default: `false`). - """) - public boolean terminateUnschedulableJobs; - - @ConfigOption - @Description(""" - One or more container mounts. Mounts can be specified as simple e.g. `/some/path` or canonical format e.g. `/host/path:/mount/path[:ro|rw]`. - """) - public List volumes; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java deleted file mode 100644 index 7549901033..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsClientConfig.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; -import nextflow.script.types.MemoryUnit; - -public class AwsClientConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Allow the access of public S3 buckets without providing AWS credentials. Any service that does not accept unsigned requests will return a service access error. - """) - public boolean anonymous; - - @ConfigOption - @Description(""" - Specify predefined bucket permissions, also known as *canned ACL*. Can be one of `Private`, `PublicRead`, `PublicReadWrite`, `AuthenticatedRead`, `LogDeliveryWrite`, `BucketOwnerRead`, `BucketOwnerFullControl`, or `AwsExecRead`. - - [Read more](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) - """) - public String s3Acl; - - @ConfigOption - @Description(""" - The amount of time to wait (in milliseconds) when initially establishing a connection before timing out. - """) - public int connectionTimeout; - - @ConfigOption - @Description(""" - The AWS S3 API entry point e.g. `https://s3-us-west-1.amazonaws.com`. The endpoint must include the protocol prefix e.g. `https://`. - """) - public String endpoint; - - @ConfigOption - @Description(""" - The maximum number of concurrency in S3 async clients. - """) - public int maxConcurrency; - - @ConfigOption - @Description(""" - The maximum number of allowed open HTTP connections. - """) - public int maxConnections; - - @ConfigOption - @Description(""" - The maximum number of retry attempts for failed retryable requests. - """) - public int maxErrorRetry; - - @ConfigOption - @Description(""" - The maximum native memory used by the S3 asynchronous client for S3 transfers. - """) - public MemoryUnit maxNativeMemory; - - @ConfigOption - @Description(""" - The minimum size of a single part in a multipart upload (default: `8 MB`). - """) - public MemoryUnit minimumPartSize; - - @ConfigOption - @Description(""" - The S3 Async client threshold to create multipart S3 transfers. Default is the same as `minimumPartSize`. - """) - public MemoryUnit multipartThreshold; - - @ConfigOption - @Description(""" - The proxy host to connect through. - """) - public String proxyHost; - - @ConfigOption - @Description(""" - The port on the proxy host to connect through. - """) - public int proxyPort; - - @ConfigOption - @Description(""" - The protocol scheme to use when connecting through a proxy (http/https). - """) - public String proxyScheme; - - @ConfigOption - @Description(""" - The user name to use when connecting through a proxy. - """) - public String proxyUsername; - - @ConfigOption - @Description(""" - The password to use when connecting through a proxy. - """) - public String proxyPassword; - - @ConfigOption - @Description(""" - Enable the requester pays feature for S3 buckets. - """) - public boolean requesterPays; - - @ConfigOption - @Description(""" - Enable the use of path-based access model that is used to specify the address of an object in S3-compatible storage systems. - """) - public boolean s3PathStyleAccess; - - @ConfigOption - @Description(""" - The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection is timed out. - """) - public int socketTimeout; - - @ConfigOption - @Description(""" - The S3 server side encryption to be used when saving objects on S3, either `AES256` or `aws:kms` values are allowed. - """) - public String storageEncryption; - - @ConfigOption - @Description(""" - The AWS KMS key Id to be used to encrypt files stored in the target S3 bucket. - """) - public String storageKmsKeyId; - - @ConfigOption - @Description(""" - The S3 Async client target network throughput in Gbps. This value is used to automatically set `maxConcurrency` and `maxNativeMemory` (default: `10`). - """) - public Double targetThroughputInGbps; - - @ConfigOption - @Description(""" - The number of threads used by the S3 transfer manager (default: `10`). - """) - public int transferManagerThreads; - - @ConfigOption - @Description(""" - The S3 storage class applied to stored objects, one of \\[`STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`\\] (default: `STANDARD`). - """) - public String uploadStorageClass; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsConfig.java deleted file mode 100644 index 326e2af9d9..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AwsConfig.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class AwsConfig implements ConfigScope { - - @ConfigOption - @Description(""" - AWS account access key. - """) - public String accessKey; - - @ConfigOption - @Description(""" - AWS profile from `~/.aws/credentials`. - """) - public String profile; - - @ConfigOption - @Description(""" - AWS region (e.g. `us-east-1`). - """) - public String region; - - @ConfigOption - @Description(""" - AWS account secret key. - """) - public String secretKey; - - public AwsBatchConfig batch; - - public AwsClientConfig client; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureActiveDirectoryConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureActiveDirectoryConfig.java deleted file mode 100644 index 7d648b11d5..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureActiveDirectoryConfig.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class AzureActiveDirectoryConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The service principal client ID. - """) - public String servicePrincipalId; - - @ConfigOption - @Description(""" - The service principal client secret. - """) - public String servicePrincipalSecret; - - @ConfigOption - @Description(""" - The Azure tenant ID. - """) - public String tenantId; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchConfig.java deleted file mode 100644 index adad9907b4..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchConfig.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.Map; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.config.schema.PlaceholderName; -import nextflow.script.dsl.Description; - -public class AzureBatchConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The batch service account name. - """) - public String accountName; - - @ConfigOption - @Description(""" - The batch service account key. - """) - public String accountKey; - - @ConfigOption - @Description(""" - Enable the automatic creation of batch pools specified in the Nextflow configuration file (default: `false`). - """) - public boolean allowPoolCreation; - - @ConfigOption - @Description(""" - Enable the automatic creation of batch pools depending on the pipeline resources demand (default: `true`). - """) - public String autoPoolMode; - - @ConfigOption - @Description(""" - The mode in which the `azcopy` tool is installed by Nextflow (default: `'node'`). The following options are available: - - - `'node'`: the `azcopy` tool is installed once during the pool creation - - `'task'`: the `azcopy` tool is installed for each task execution - - `'off'`: the `azcopy` tool is not installed - """) - public String copyToolInstallMode; - - @ConfigOption - @Description(""" - Delete all jobs when the workflow completes (default: `false`). - """) - public boolean deleteJobsOnCompletion; - - @ConfigOption - @Description(""" - Delete all compute node pools when the workflow completes (default: `false`). - """) - public boolean deletePoolsOnCompletion; - - @ConfigOption - @Description(""" - Delete each task when it completes (default: `true`). - """) - public boolean deleteTasksOnCompletion; - - @ConfigOption - @Description(""" - The batch service endpoint e.g. `https://nfbatch1.westeurope.batch.azure.com`. - """) - public String endpoint; - - @ConfigOption - @Description(""" - The name of the batch service region, e.g. `westeurope` or `eastus2`. Not needed when the endpoint is specified. - """) - public String location; - - @ConfigOption - @Description(""" - The client ID for an Azure managed identity that is available on all Azure Batch node pools. This identity will be used for task-level authentication to Azure services. - """) - public String poolIdentityClientId; - - @PlaceholderName("") - public Map pools; - - @ConfigOption - @Description(""" - When the workflow completes, set all jobs to terminate on task completion (default: `true`). - """) - public boolean terminateJobsOnCompletion; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchPoolConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchPoolConfig.java deleted file mode 100644 index f9e0bc76b2..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureBatchPoolConfig.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class AzureBatchPoolConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable autoscaling feature for the pool identified with ``. - """) - public boolean autoScale; - - @ConfigOption - @Description(""" - The internal root mount point when mounting File Shares. Must be `/mnt/resource/batch/tasks/fsmounts` for CentOS nodes or `/mnt/batch/tasks/fsmounts` for Ubuntu nodes (default: CentOS). - """) - public String fileShareRootPath; - - @ConfigOption - @Description(""" - Enable the use of low-priority VMs (default: `false`). - """) - public boolean lowPriority; - - @ConfigOption - @Description(""" - The max number of virtual machines when using auto scaling. - """) - public int maxVmCount; - - @ConfigOption - @Description(""" - The mount options for mounting the file shares (default: `-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp`). - """) - public String mountOptions; - - @ConfigOption - @Description(""" - The offer type of the virtual machine type used by the pool identified with `` (default: `centos-container`). - """) - public String offer; - - @ConfigOption - @Description(""" - Enable the task to run with elevated access. Ignored if `runAs` is set (default: `false`). - """) - public boolean privileged; - - @ConfigOption - @Description(""" - The publisher of virtual machine type used by the pool identified with `` (default: `microsoft-azure-batch`). - """) - public String publisher; - - @ConfigOption - @Description(""" - The username under which the task is run. The user must already exist on each node of the pool. - """) - public String runAs; - - @ConfigOption - @Description(""" - The scale formula for the pool identified with ``. - - [Read more](https://docs.microsoft.com/en-us/azure/batch/batch-automatic-scaling) - """) - public String scaleFormula; - - @ConfigOption - @Description(""" - The interval at which to automatically adjust the Pool size according to the autoscale formula. Must be at least 5 minutes and at most 168 hours (default: `10 mins`). - """) - public Duration scaleInterval; - - @ConfigOption - @Description(""" - The scheduling policy for the pool identified with ``. Can be either `spread` or `pack` (default: `spread`). - """) - public String schedulePolicy; - - @ConfigOption - @Description(""" - The ID of the Compute Node agent SKU which the pool identified with `` supports (default: `batch.node.centos 8`). - """) - public String sku; - - public AzureBatchPoolStartTaskConfig startTask; - - @ConfigOption - @Description(""" - The subnet ID of a virtual network in which to create the pool. - """) - public String virtualNetwork; - - @ConfigOption - @Description(""" - The number of virtual machines provisioned by the pool identified with ``. - """) - public String vmCount; - - @ConfigOption - @Description(""" - The virtual machine type used by the pool identified with ``. - """) - public String vmType; - -} - -class AzureBatchPoolStartTaskConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The `startTask` that is executed as the node joins the Azure Batch node pool. - """) - public String script; - - @ConfigOption - @Description(""" - Enable the `startTask` to run with elevated access (default: `false`). - """) - public String privileged; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureConfig.java deleted file mode 100644 index 2d5e5a125c..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureConfig.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class AzureConfig implements ConfigScope { - - public AzureActiveDirectoryConfig activeDirectory; - - public AzureBatchConfig batch; - - public AzureManagedIdentityConfig managedIdentity; - - public AzureRegistryConfig registry; - - public AzureRetryConfig retry; - - public AzureStorageConfig storage; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureManagedIdentityConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureManagedIdentityConfig.java deleted file mode 100644 index 86a9af5402..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureManagedIdentityConfig.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class AzureManagedIdentityConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The client ID for an Azure managed identity. - - [Read more](https://nextflow.io/docs/latest/azure.html#managed-identities) - """) - public String clientId; - - @ConfigOption - @Description(""" - When `true`, use the system-assigned managed identity to authenticate Azure resources. - - [Read more](https://nextflow.io/docs/latest/azure.html#managed-identities) - """) - public boolean system; - - @ConfigOption - @Description(""" - The Azure tenant ID. - """) - public String tenantId; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRegistryConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRegistryConfig.java deleted file mode 100644 index 70d62f794d..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRegistryConfig.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class AzureRegistryConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The container registry from which to pull the Docker images (default: `docker.io`). - """) - public String server; - - @ConfigOption - @Description(""" - The username to connect to a private container registry. - """) - public String userName; - - @ConfigOption - @Description(""" - The password to connect to a private container registry. - """) - public String password; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRetryConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRetryConfig.java deleted file mode 100644 index 7f4910de4c..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureRetryConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class AzureRetryConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Delay when retrying failed API requests (default: `500ms`). - """) - public Duration delay; - - @ConfigOption - @Description(""" - Jitter value when retrying failed API requests (default: `0.25`). - """) - public double jitter; - - @ConfigOption - @Description(""" - Max attempts when retrying failed API requests (default: `10`). - """) - public int maxAttempts; - - @ConfigOption - @Description(""" - Max delay when retrying failed API requests (default: `90s`). - """) - public Duration maxDelay; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureStorageConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureStorageConfig.java deleted file mode 100644 index 5698ac9ef9..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/AzureStorageConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class AzureStorageConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The blob storage account name. - """) - public String accountName; - - @ConfigOption - @Description(""" - The blob storage account key. - """) - public String accountKey; - - @ConfigOption - @Description(""" - The blob storage shared access signature (SAS) token, which can be provided instead of an account key. - """) - public String sasToken; - - @ConfigOption - @Description(""" - The duration of the SAS token generated by Nextflow when the `sasToken` option is *not* specified (default: `48h`). - """) - public Duration tokenDuration; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/CondaConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/CondaConfig.java deleted file mode 100644 index 5f8d130b97..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/CondaConfig.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.nio.file.Path; -import java.util.List; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class CondaConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable Conda execution (default: `false`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - The path where Conda environments are stored. - """) - public Path cacheDir; - - @ConfigOption - @Description(""" - The Conda channels that can be used to resolve Conda packages. - """) - public List channels; - - @ConfigOption - @Description(""" - Extra command line options to append to the `conda create` command. - """) - public String createOptions; - - @ConfigOption - @Description(""" - The amount of time to wait for the Conda environment to be created before failing (default: `20 min`). - """) - public Duration createTimeout; - - @ConfigOption - @Description(""" - When `true`, use `mamba` instead of `conda` to create the Conda environments. - - [Read more](https://github.com/mamba-org/mamba) - """) - public boolean useMamba; - - @ConfigOption - @Description(""" - When `true`, use `micromamba` instead of `conda` to create the Conda environments. - - [Read more](https://mamba.readthedocs.io/en/latest/user_guide/micromamba.html) - """) - public boolean useMicromamba; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/Config.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/Config.java index 1f02a67588..1504915cb4 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/Config.java +++ b/modules/nf-lang/src/main/java/nextflow/config/scopes/Config.java @@ -15,168 +15,28 @@ */ package nextflow.config.scopes; -import java.nio.file.Path; - import nextflow.config.schema.ConfigOption; import nextflow.config.schema.ConfigScope; import nextflow.script.dsl.Description; public class Config implements ConfigScope { - // OPTIONS - - @ConfigOption - @Description(""" - The remote work directory used by hybrid workflows. Equivalent to the `-bucket-dir` option of the `run` command. - """) - public Path bucketDir; - - @ConfigOption - @Description(""" - If `true`, on a successful completion of a run all files in *work* directory are automatically deleted. - """) - public boolean cleanup; - - @ConfigOption - @Description(""" - If `true`, dump task hash keys in the log file, for debugging purposes. Equivalent to the `-dump-hashes` option of the `run` command. - """) - public boolean dumpHashes; - - @ConfigOption - @Description(""" - Defines the pipeline output directory. Equivalent to the `-output-dir` option of the `run` command. - """) - public Path outputDir; - - @ConfigOption - @Description(""" - If `true`, enable the use of previously cached task executions. Equivalent to the `-resume` option of the `run` command. - """) - public boolean resume; - - @ConfigOption - @Description(""" - The pipeline work directory. Equivalent to the `-work-dir` option of the `run` command. - """) - public Path workDir; - - // SCOPES - - @Description(""" - The `apptainer` scope controls how [Apptainer](https://apptainer.org) containers are executed by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#apptainer) - """) - public ApptainerConfig apptainer; - - @Description(""" - The `aws` scope controls the interactions with AWS, including AWS Batch and S3. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#aws) - """) - public AwsConfig aws; - - @Description(""" - The `azure` scope allows you to configure the interactions with Azure, including Azure Batch and Azure Blob Storage. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#azure) - """) - public AzureConfig azure; - - @Description(""" - The `charliecloud` scope controls how [Charliecloud](https://hpc.github.io/charliecloud/) containers are executed by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#charliecloud) - """) - public CharliecloudConfig charliecloud; - - @Description(""" - The `conda` scope controls the creation of Conda environments by the Conda package manager. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#conda) - """) - public CondaConfig conda; - - @Description(""" - The `dag` scope controls the workflow diagram generated by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#dag) - """) - public DagConfig dag; - - @Description(""" - The `docker` scope controls how [Docker](https://www.docker.com) containers are executed by Nextflow. - - - [Read more](https://nextflow.io/docs/latest/reference/config.html#docker) - """) - public DockerConfig docker; - @Description(""" The `env` scope allows you to define environment variables that will be exported into the environment where workflow tasks are executed. [Read more](https://nextflow.io/docs/latest/reference/config.html#env) """) - public EnvConfig env; - - @Description(""" - The `executor` scope controls various executor behaviors. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#executor) - """) - public ExecutorConfig executor; - - @Description(""" - The `fusion` scope provides advanced configuration for the use of the [Fusion file system](https://docs.seqera.io/fusion). - - [Read more](https://nextflow.io/docs/latest/reference/config.html#fusion) - """) - public FusionConfig fusion; - - @Description(""" - The `google` scope allows you to configure the interactions with Google Cloud, including Google Cloud Batch and Google Cloud Storage. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#google) - """) - public GoogleConfig google; - - @Description(""" - The `k8s` scope controls the deployment and execution of workflow applications in a Kubernetes cluster. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#k8s) - """) - public K8sConfig k8s; + public ConfigScope env; - @Description(""" - The `lineage` scope controls the generation of lineage metadata. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#lineage) - """) - public LineageConfig lineage; - - @Description(""" - The `mail` scope controls the mail server used to send email notifications. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#mail) - """) - public MailConfig mail; - - @Description(""" - The `manifest` scope allows you to define some metadata that is useful when publishing or running your pipeline. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#manifest) - """) - public Manifest manifest; - - public NextflowConfig nextflow; + // NOTE: `nextflow` config options are inferred from FeatureFlagDsl + public ConfigScope nextflow; @Description(""" The `params` scope allows you to define parameters that will be accessible in the pipeline script. [Read more](https://nextflow.io/docs/latest/reference/config.html#params) """) - public ParamsConfig params; + public ConfigScope params; @ConfigOption @Description(""" @@ -186,83 +46,14 @@ public class Config implements ConfigScope { """) public PluginsDsl plugins; - @Description(""" - The `podman` scope controls how [Podman](https://podman.io/) containers are executed by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#podman) - """) - public PodmanConfig podman; - - public ProcessConfig process; + // NOTE: `process` config options are inferred from ProcessDsl + public ConfigScope process; @Description(""" The `profiles` block allows you to define configuration profiles. A profile is a set of configuration settings that can be applied at runtime with the `-profile` command line option. [Read more](https://nextflow.io/docs/latest/config.html#config-profiles) """) - public ProfilesConfig profiles; - - @Description(""" - The `report` scope allows you to configure the workflow [execution report](https://nextflow.io/docs/latest/tracing.html#execution-report). - - [Read more](https://nextflow.io/docs/latest/reference/config.html#report) - """) - public ReportConfig report; - - @Description(""" - The `shifter` scope controls how [Shifter](https://docs.nersc.gov/programming/shifter/overview/) containers are executed by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#shifter) - """) - public ShifterConfig shifter; - - @Description(""" - The `singularity` scope controls how [Singularity](https://sylabs.io/singularity/) containers are executed by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#singularity) - """) - public SingularityConfig singularity; - - @Description(""" - The `spack` scope controls the creation of a Spack environment by the Spack package manager. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#spack) - """) - public SpackConfig spack; - - @Description(""" - The `timeline` scope controls the execution timeline report generated by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#timeline) - """) - public TimelineConfig timeline; - - @Description(""" - The `tower` scope controls the settings for the [Seqera Platform](https://seqera.io) (formerly Tower Cloud). - - [Read more](https://nextflow.io/docs/latest/reference/config.html#tower) - """) - public TowerConfig tower; - - @Description(""" - The `trace` scope controls the layout of the execution trace file generated by Nextflow. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#trace) - """) - public TraceConfig trace; - - @Description(""" - The `wave` scope provides advanced configuration for the use of [Wave containers](https://docs.seqera.io/wave). - - [Read more](https://nextflow.io/docs/latest/reference/config.html#wave) - """) - public WaveConfig wave; - - @Description(""" - The `workflow` scope provides workflow execution options. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#workflow) - """) - public WorkflowConfig workflow; + public ConfigScope profiles; } diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/DockerConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/DockerConfig.java deleted file mode 100644 index e66a9932f4..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/DockerConfig.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class DockerConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable Docker execution (default: `false`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - This attribute can be used to provide any option supported by the Docker engine i.e. `docker [OPTIONS]`. - """) - public String engineOptions; - - @ConfigOption - @Description(""" - Comma separated list of environment variable names to be included in the container environment. - """) - public String envWhitelist; - - @ConfigOption - @Description(""" - Fix ownership of files created by the Docker container. - """) - public boolean fixOwnership; - - @ConfigOption - @Description(""" - Use command line options removed since Docker 1.10.0 (default: `false`). - """) - public boolean legacy; - - @ConfigOption - @Description(""" - Add the specified flags to the volume mounts e.g. `'ro,Z'`. - """) - public String mountFlags; - - @ConfigOption - @Description(""" - The registry from where Docker images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. - """) - public String registry; - - @ConfigOption - @Description(""" - Clean up the container after the execution (default: `true`). See the [Docker documentation](https://docs.docker.com/engine/reference/run/#clean-up---rm) for details. - """) - public boolean remove; - - @ConfigOption - @Description(""" - This attribute can be used to provide any extra command line options supported by the `docker run` command. See the [Docker documentation](https://docs.docker.com/engine/reference/run/) for details. - """) - public String runOptions; - - @ConfigOption - @Description(""" - Executes Docker run command as `sudo` (default: `false`). - """) - public boolean sudo; - - @ConfigOption - @Description(""" - Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. - """) - public String temp; - - @ConfigOption - @Description(""" - Allocates a pseudo-tty (default: `false`). - """) - public boolean tty; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/EnvConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/EnvConfig.java deleted file mode 100644 index 5b53dc532b..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/EnvConfig.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class EnvConfig implements ConfigScope { -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorConfig.java deleted file mode 100644 index 75009e36de..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ExecutorConfig.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; -import nextflow.script.types.MemoryUnit; - -public class ExecutorConfig implements ConfigScope { - - @ConfigOption - @Description(""" - *Used only by the SLURM, LSF, PBS and PBS Pro executors.* - - Specify the project or organisation account that should be charged for running the pipeline jobs. - """) - public String account; - - @ConfigOption - @Description(""" - *Used only by the local executor.* - - The maximum number of CPUs made available by the underlying system. - """) - public int cpus; - - @ConfigOption - @Description(""" - Determines how often to log the executor status (default: `5 min`). - """) - public Duration dumpInterval; - - @ConfigOption - @Description(""" - *Used only by grid executors.* - - Determines how long to wait for the `.exitcode` file to be created after the task has completed, before returning an error status (default: `270 sec`). - """) - public Duration exitReadTimeout; - - @ConfigOption - @Description(""" - *Used only by grid executors and Google Batch.* - - Determines the name of jobs submitted to the underlying cluster executor: - ```nextflow - executor.jobName = { "$task.name - $task.hash" } - ``` - """) - public String jobName; - - @ConfigOption - @Description(""" - Determines the number of jobs that can be killed in a single command execution (default: `100`). - """) - public int killBatchSize = 100; - - @ConfigOption - @Description(""" - *Used only by the local executor.* - - The maximum amount of memory made available by the underlying system. - """) - public MemoryUnit memory; - - @ConfigOption - @Description(""" - The name of the executor to be used (default: `local`). - """) - public String name; - - @ConfigOption - @Description(""" - *Used only by the [SLURM](https://nextflow.io/docs/latest/executor.html#slurm) executor.* - - When `true`, memory allocations for SLURM jobs are specified as `--mem-per-cpu ` instead of `--mem `. - """) - public boolean perCpuMemAllocation; - - @ConfigOption - @Description(""" - *Used only by the [LSF](https://nextflow.io/docs/latest/executor.html#lsf) executor.* - - Enables the *per-job* memory limit mode for LSF jobs. - """) - public boolean perJobMemLimit; - - @ConfigOption - @Description(""" - *Used only by the [LSF](https://nextflow.io/docs/latest/executor.html#lsf) executor.* - - Enables the *per-task* memory reserve mode for LSF jobs. - """) - public boolean perTaskReserve; - - @ConfigOption - @Description(""" - Determines how often to check for process termination. Default varies for each executor. - """) - public Duration pollInterval; - - @ConfigOption - @Description(""" - Determines how job status is retrieved. When `false` only the queue associated with the job execution is queried. When `true` the job status is queried globally i.e. irrespective of the submission queue (default: `false`). - """) - public boolean queueGlobalStatus; - - @ConfigOption - @Description(""" - The number of tasks the executor will handle in a parallel manner. A queue size of zero corresponds to no limit. Default varies for each executor. - """) - public Integer queueSize; - - @ConfigOption - @Description(""" - *Used only by grid executors.* - - Determines how often to fetch the queue status from the scheduler (default: `1 min`). - """) - public Duration queueStatInterval; - - @Description(""" - The `executor.retry` scope controls the behavior of retrying failed job submissions. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#executor) - """) - public ExecutorRetryConfig retry; - - @ConfigOption - @Description(""" - Determines the max rate of job submission per time unit, for example `'10sec'` (10 jobs per second) or `'50/2min'` (50 jobs every 2 minutes) (default: unlimited). - """) - public String submitRateLimit; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/FusionConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/FusionConfig.java deleted file mode 100644 index a46ca8837e..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/FusionConfig.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.MemoryUnit; - -public class FusionConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable/disable the use of Fusion file system. - """) - public boolean enabled; - - @ConfigOption - @Description(""" - The maximum size of the local cache used by the Fusion client. - """) - public MemoryUnit cacheSize; - - @ConfigOption - @Description(""" - The URL from where the container layer provisioning the Fusion client is downloaded. - """) - public String containerConfigUrl; - - @ConfigOption - @Description(""" - When `true` the access credentials required by the underlying object storage are exported to the task execution environment. - """) - public boolean exportStorageCredentials; - - @ConfigOption - @Description(""" - The level of logging emitted by the Fusion client. - """) - public String logLevel; - - @ConfigOption - @Description(""" - Where the logging output is written. - """) - public String logOutput; - - @ConfigOption - @Description(""" - Enables the use of privileged containers when using Fusion (default: `true`). - """) - public boolean privileged; - - @ConfigOption - @Description(""" - Enable Fusion snapshotting (preview, default: `false`). This feature allows Fusion to automatically restore a job when it is interrupted by a spot reclamation. - """) - public boolean snapshots; - - @ConfigOption - @Description(""" - The pattern that determines how tags are applied to files created via the Fusion client (default: `[.command.*|.exitcode|.fusion.*](nextflow.io/metadata=true),[*](nextflow.io/temporary=true)`). Set to `false` to disable tags. - """) - public String tags; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleBatchConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleBatchConfig.java deleted file mode 100644 index 888642b8d3..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleBatchConfig.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.List; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.MemoryUnit; - -public class GoogleBatchConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The set of allowed locations for VMs to be provisioned (default: no restriction). - - [Read more](https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#locationpolicy) - """) - public List allowedLocations; - - @ConfigOption - @Description(""" - The size of the virtual machine boot disk, e.g `50.GB` (default: none). - """) - public MemoryUnit bootDiskSize; - - @ConfigOption - @Description(""" - The minimum CPU Platform, e.g. `'Intel Skylake'` (default: none). - - [Read more](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#specifications) - """) - public String cpuPlatform; - - @ConfigOption - @Description(""" - List of custom mount options for `gcsfuse` (default: `['-o rw', '-implicit-dirs']`). - """) - public List gcsfuseOptions; - - @ConfigOption - @Description(""" - Max number of execution attempts of a job interrupted by a Compute Engine spot reclaim event (default: `5`). - """) - public int maxSpotAttempts; - - @ConfigOption - @Description(""" - The URL of an existing network resource to which the VM will be attached. - """) - public String network; - - @ConfigOption - @Description(""" - The network tags to be applied to the instances created by Google Batch jobs (e.g., `['allow-ssh', 'allow-http']`). - - [Read more](https://cloud.google.com/vpc/docs/add-remove-network-tags) - """) - public List networkTags; - - @ConfigOption - @Description(""" - The Google service account email to use for the pipeline execution. If not specified, the default Compute Engine service account for the project will be used. - - [Read more](https://www.nextflow.io/docs/latest/google.html#credentials) - """) - public String serviceAccountEmail; - - @ConfigOption - @Description(""" - When `true`, enables the usage of *spot* virtual machines (default: `false`). - """) - public boolean spot; - - @ConfigOption - @Description(""" - The URL of an existing subnetwork resource in the network to which the VM will be attached. - """) - public String subnetwork; - - @ConfigOption - @Description(""" - When `true`, the VM will *not* be provided with a public IP address, and only contain an internal IP. - """) - public boolean usePrivateAddress; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleConfig.java deleted file mode 100644 index 10bb8f53dc..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/GoogleConfig.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class GoogleConfig implements ConfigScope { - - public GoogleBatchConfig batch; - - @ConfigOption - @Description(""" - When `true`, the given Google Cloud project ID is used as the billing project for storage access (default: `false`). Required when accessing data from *requester pays enabled* buckets. - - [Read more](https://cloud.google.com/storage/docs/requester-pays) - """) - public boolean enableRequesterPaysBuckets; - - @ConfigOption - @Description(""" - The HTTP connection timeout for Cloud Storage API requests (default: `'60s'`). - """) - public Duration httpConnectTimeout; - - @ConfigOption - @Description(""" - The HTTP read timeout for Cloud Storage API requests (default: `'60s'`). - """) - public Duration httpReadTimeout; - - @ConfigOption - @Description(""" - The Google Cloud location where jobs are executed (default: `us-central1`). - """) - public String location; - - @ConfigOption - @Description(""" - The Google Cloud project ID to use for pipeline execution. - """) - public String project; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sConfig.java deleted file mode 100644 index 6c0da62017..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sConfig.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.List; -import java.util.Map; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class K8sConfig implements ConfigScope { - - public K8sRetryConfig retryPolicy; - - @ConfigOption - @Description(""" - When `true`, host paths are automatically mounted into the task pods (default: `false`). Only intended for development purposes when using a single node. - """) - public boolean autoMountHostPaths; - - @ConfigOption - @Description(""" - Whether to use Kubernetes `Pod` or `Job` resource type to carry out Nextflow tasks (default: `Pod`). - """) - public String computeResourceType; - - @ConfigOption - @Description(""" - The Kubernetes [configuration context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) to use. - """) - public String context; - - @ConfigOption - @Description(""" - When `true`, both the pod CPU `request` and `limit` are set to the `cpus` directive, otherwise only the `request` is set (default: `false`). - """) - public boolean cpuLimits; - - // @ConfigOption(""" - // When `true`, the pod spec for each task is saved to `.command.yaml` in the task directory (default: `false`). - // """) - // public boolean debugYaml; - - @ConfigOption - @Description(""" - When `true`, includes the hostname of each task in the execution trace (default: `false`). - """) - public boolean fetchNodeName; - - @ConfigOption - @Description(""" - The FUSE device plugin to be used when enabling Fusion in unprivileged mode (default: `['nextflow.io/fuse': 1]`). - """) - public Map fuseDevicePlugin; - - @ConfigOption - @Description(""" - The Kubernetes HTTP client request connection timeout e.g. `'60s'`. - """) - public Duration httpConnectTimeout; - - @ConfigOption - @Description(""" - The Kubernetes HTTP client request connection read timeout e.g. `'60s'`. - """) - public Duration httpReadTimeout; - - @ConfigOption - @Description(""" - The path where the workflow is launched and the user data is stored (default: `/`). Must be a path in a shared K8s persistent volume. - """) - public String launchDir; - - @ConfigOption - @Description(""" - The Kubernetes namespace to use (default: `default`). - """) - public String namespace; - - @ConfigOption - @Description(""" - Allows the definition of one or more pod configuration options such as environment variables, config maps, secrets, etc. Allows the same settings as the [pod](https://nextflow.io/docs/latest/process.html#pod) process directive. - """) - public List pod; - - @ConfigOption - @Description(""" - The path where Nextflow projects are downloaded (default: `/projects`). Must be a path in a shared K8s persistent volume. - """) - public String projectDir; - - @ConfigOption - @Description(""" - The strategy for pulling container images. Can be `IfNotPresent`, `Always`, `Never`. - - [Read more](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) - """) - public String pullPolicy; - - @ConfigOption - @Description(""" - The user ID to be used to run the containers. Shortcut for the `securityContext` option. - """) - public String runAsUser; - - @ConfigOption - @Description(""" - The [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) to use for all pods. - """) - public Map securityContext; - - @ConfigOption - @Description(""" - The Kubernetes [service account name](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to use. - """) - public String serviceAccount; - - @ConfigOption - @Description(""" - The name of the persistent volume claim where the shared work directory is stored. - """) - public String storageClaimName; - - @ConfigOption - @Description(""" - The mount path for the persistent volume claim (default: `/workspace`). - """) - public String storageMountPath; - - @ConfigOption - @Description(""" - The path in the persistent volume to be mounted (default: `/`). - """) - public String storageSubPath; - - @ConfigOption - @Description(""" - The path of the shared work directory (default: `/work`). Must be a path in a shared K8s persistent volume. - """) - public String workDir; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sRetryConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sRetryConfig.java deleted file mode 100644 index f86d6a04b2..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/K8sRetryConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class K8sRetryConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Delay when retrying failed API requests (default: `500ms`). - """) - public Duration delay; - - @ConfigOption - @Description(""" - Jitter value when retrying failed API requests (default: `0.25`). - """) - public double jitter; - - @ConfigOption - @Description(""" - Max attempts when retrying failed API requests (default: `10`). - """) - public int maxAttempts; - - @ConfigOption - @Description(""" - Max delay when retrying failed API requests (default: `90s`). - """) - public Duration maxDelay; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/LineageConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/LineageConfig.java deleted file mode 100644 index 7a901eb425..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/LineageConfig.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class LineageConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable generation of lineage metadata (default: `false`). - """) - public boolean enabled; - -} - -class LineageStoreConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The location of the lineage metadata store (default: `./.lineage`). - """) - public String location; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/MailConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/MailConfig.java deleted file mode 100644 index 4f58378bf3..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/MailConfig.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class MailConfig implements ConfigScope { - - @ConfigOption - @Description(""" - When `true` enables Java Mail logging for debugging purpose. - """) - public boolean debug; - - @ConfigOption - @Description(""" - Default email sender address. - """) - public String from; - - @Description(""" - The `mail.smtp` scope supports any SMTP configuration property in the [Java Mail API](https://javaee.github.io/javamail/). - - [Read more](https://javaee.github.io/javamail/docs/api/com/sun/mail/smtp/package-summary.html#properties) - """) - public MailSmtpConfig smtp; - -} - -class MailSmtpConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Host name of the mail server. - """) - public String host; - - @ConfigOption - @Description(""" - User password to connect to the mail server. - """) - public String password; - - @ConfigOption - @Description(""" - Port number of the mail server. - """) - public int port; - - @ConfigOption - @Description(""" - User name to connect to the mail server. - """) - public String user; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/Manifest.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/Manifest.java deleted file mode 100644 index 60d7682f9b..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/Manifest.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.List; -import java.util.Map; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class Manifest implements ConfigScope { - - @ConfigOption - @Description(""" - Project author name (use a comma to separate multiple names). - """) - public String author; - - @ConfigOption - @Description(""" - List of project contributors. Should be a list of maps. - """) - public List contributors; - - @ConfigOption - @Description(""" - Git repository default branch (default: `master`). - """) - public String defaultBranch; - - @ConfigOption - @Description(""" - Free text describing the workflow project. - """) - public String description; - - @ConfigOption - @Description(""" - Project documentation URL. - """) - public String docsUrl; - - @ConfigOption - @Description(""" - Project related publication DOI identifier. - """) - public String doi; - - @ConfigOption - @Description(""" - Project home page URL. - """) - public String homePage; - - @ConfigOption - @Description(""" - Project related icon location (Relative path or URL). - """) - public String icon; - - @ConfigOption - @Description(""" - Project license. - """) - public String license; - - @ConfigOption - @Description(""" - Project main script (default: `main.nf`). - """) - public String mainScript; - - @ConfigOption - @Description(""" - Project short name. - """) - public String name; - - @ConfigOption - @Description(""" - Minimum required Nextflow version. - """) - public String nextflowVersion; - - @ConfigOption - @Description(""" - Project organization. - """) - public String organization; - - @ConfigOption - @Description(""" - Pull submodules recursively from the Git repository. - """) - public boolean recurseSubmodules; - - @ConfigOption - @Description(""" - Project version number. - """) - public String version; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/NextflowConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/NextflowConfig.java deleted file mode 100644 index c4da097bbb..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/NextflowConfig.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class NextflowConfig implements ConfigScope { - - // NOTE: `nextflow` config options are inferred from FeatureFlagDsl - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ParamsConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ParamsConfig.java deleted file mode 100644 index 37e8f212de..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ParamsConfig.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class ParamsConfig implements ConfigScope { -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/PodmanConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/PodmanConfig.java deleted file mode 100644 index 06649e92b7..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/PodmanConfig.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class PodmanConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable Podman execution (default: `false`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - This attribute can be used to provide any option supported by the Podman engine i.e. `podman [OPTIONS]`. - """) - public String engineOptions; - - @ConfigOption - @Description(""" - Comma separated list of environment variable names to be included in the container environment. - """) - public String envWhitelist; - - @ConfigOption - @Description(""" - Add the specified flags to the volume mounts e.g. `'ro,Z'`. - """) - public String mountFlags; - - @ConfigOption - @Description(""" - The registry from where container images are pulled. It should be only used to specify a private registry server. It should NOT include the protocol prefix i.e. `http://`. - """) - public String registry; - - @ConfigOption - @Description(""" - Clean-up the container after the execution (default: `true`). - """) - public boolean remove; - - @ConfigOption - @Description(""" - This attribute can be used to provide any extra command line options supported by the `podman run` command. - """) - public String runOptions; - - @ConfigOption - @Description(""" - Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. - """) - public String temp; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ProcessConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ProcessConfig.java deleted file mode 100644 index a9b2f771d4..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ProcessConfig.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class ProcessConfig implements ConfigScope { - - // NOTE: `process` config options are inferred from ProcessDsl - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ProfilesConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ProfilesConfig.java deleted file mode 100644 index da77b7950a..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ProfilesConfig.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigScope; - -public class ProfilesConfig implements ConfigScope { - - // NOTE: only used to provide completions, hover hints - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ReportConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ReportConfig.java deleted file mode 100644 index 2cc1b12f9a..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ReportConfig.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class ReportConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable the creation of the workflow execution report. - """) - public boolean enabled; - - @ConfigOption - @Description(""" - The path of the created execution report file (default: `'report-.html'`). - """) - public String file; - - @ConfigOption - @Description(""" - When `true` overwrites any existing report file with the same name. - """) - public boolean overwrite; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/ShifterConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/ShifterConfig.java deleted file mode 100644 index fb84db55f3..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/ShifterConfig.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class ShifterConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Enable Shifter execution (default: `false`). - """) - public boolean enabled; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/SpackConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/SpackConfig.java deleted file mode 100644 index 9392e2eea2..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/SpackConfig.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.nio.file.Path; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class SpackConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The path where Spack environments are stored. - """) - public Path cacheDir; - - @ConfigOption - @Description(""" - Enables checksum verification for source tarballs (default: `true`). - """) - public boolean checksum; - - @ConfigOption - @Description(""" - The amount of time to wait for the Spack environment to be created before failing (default: `60 min`). - """) - public Duration createTimeout; - - @ConfigOption - @Description(""" - The maximum number of parallel package builds (default: the number of available CPUs). - """) - public int parallelBuilds; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/TimelineConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/TimelineConfig.java deleted file mode 100644 index 15cdd2e011..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/TimelineConfig.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class TimelineConfig implements ConfigScope { - - @ConfigOption - @Description(""" - When `true` enables the generation of the timeline report file (default: `false`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - Timeline file name (default: `'timeline-.html'`). - """) - public String file; - - @ConfigOption - @Description(""" - When `true` overwrites any existing timeline file with the same name. - """) - public boolean overwrite; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/TraceConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/TraceConfig.java deleted file mode 100644 index 045f16fae7..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/TraceConfig.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class TraceConfig implements ConfigScope { - - @ConfigOption - @Description(""" - When `true`, enables the creation of the execution trace report file (default: `false`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - Comma separated list of fields to be included in the report. - - [Read more](https://nextflow.io/docs/latest/tracing.html#trace-report) - """) - public String fields; - - @ConfigOption - @Description(""" - Trace file name (default: `'trace-.txt'`). - """) - public String file; - - @ConfigOption - @Description(""" - When `true`, overwrites any existing trace file with the same name. - """) - public boolean overwrite; - - @ConfigOption - @Description(""" - When `true`, uses raw number formatting i.e. durations are reported in milliseconds and memory in bytes. - """) - public boolean raw; - - @ConfigOption - @Description(""" - Character used to separate values in each row (default: `\t`). - """) - public String sep; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveBuildConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveBuildConfig.java deleted file mode 100644 index 7d9cbce41f..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveBuildConfig.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class WaveBuildConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The container repository where images built by Wave are uploaded. - """) - public String repository; - - @ConfigOption - @Description(""" - The container repository used to cache image layers built by the Wave service. - """) - public String cacheRepository; - - public WaveBuildCondaConfig conda; - - public WaveBuildSpackConfig spack; - - public WaveBuildCompression compression; - -} - -class WaveBuildCondaConfig implements ConfigScope { - - @ConfigOption - @Description(""" - One or more Conda packages to be always added in the resulting container (default: `conda-forge::procps-ng`). - """) - public String basePackages; - - @ConfigOption - @Description(""" - One or more commands to be added to the Dockerfile used to build a Conda based image. - """) - public String commands; - - @ConfigOption - @Description(""" - The Mamba container image that is used to build Conda based container. - """) - public String mambaImage; - -} - -class WaveBuildSpackConfig implements ConfigScope { - - @ConfigOption - @Description(""" - One or more Spack packages to be always added in the resulting container. - """) - public String basePackages; - - @ConfigOption - @Description(""" - One or more commands to be added to the Dockerfile used to build a Spack based image. - """) - public String commands; - -} - -class WaveBuildCompression implements ConfigScope { - @ConfigOption - @Description(""" - Defines the compression algorithm that should be used when building the container. Allowed values are: `gzip`, `estargz` and `zstd` (default: `gzip`). - """) - public String mode; - - @ConfigOption - @Description(""" - Level of compression used when building a container depending the chosen algorithm: gzip, estargz (0-9) and zstd (0-22). - """) - public Integer level; - - @ConfigOption - @Description(""" - Forcefully apply compression option to all layers, including already existing layers (default: `false`). - """) - public boolean force; -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveConfig.java deleted file mode 100644 index f5514ea589..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveConfig.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class WaveConfig implements ConfigScope { - - public WaveBuildConfig build; - - @ConfigOption - @Description(""" - Enable the use of Wave containers. - """) - public boolean enabled; - - @ConfigOption - @Description(""" - The Wave service endpoint (default: `https://wave.seqera.io`). - """) - public String endpoint; - - @ConfigOption - @Description(""" - Enables Wave container freezing. Wave will provision a non-ephemeral container image that will be pushed to a container repository of your choice. - - See also: `wave.build.repository` and `wave.build.cacheRepository` - """) - public boolean freeze; - - public WaveHttpConfig http; - - @ConfigOption - @Description(""" - Enables Wave container mirroring. - """) - public boolean mirror; - - public WaveRetryConfig retryPolicy; - - public WaveScanConfig scan; - - @ConfigOption - @Description(""" - The strategy to be used when resolving multiple Wave container requirements (default: `'container,dockerfile,conda,spack'`). - """) - public String strategy; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveHttpConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveHttpConfig.java deleted file mode 100644 index 07cb972f79..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveHttpConfig.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class WaveHttpConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The connection timeout for the Wave HTTP client (default: `30s`). - """) - public Duration connectTime; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveRetryConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveRetryConfig.java deleted file mode 100644 index 9fa004bb2e..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveRetryConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; -import nextflow.script.types.Duration; - -public class WaveRetryConfig implements ConfigScope { - - @ConfigOption - @Description(""" - The initial delay when a failing HTTP request is retried (default: `150ms`). - """) - public Duration delay; - - @ConfigOption - @Description(""" - The jitter factor used to randomly vary retry delays (default: `0.25`). - """) - public double jitter; - - @ConfigOption - @Description(""" - The max number of attempts a failing HTTP request is retried (default: `5`). - """) - public int maxAttempts; - - @ConfigOption - @Description(""" - The max delay when a failing HTTP request is retried (default: `90 seconds`). - """) - public Duration maxDelay; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveScanConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveScanConfig.java deleted file mode 100644 index 8582f50fa6..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WaveScanConfig.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class WaveScanConfig implements ConfigScope { - - @ConfigOption - @Description(""" - Determines the allowed security levels when scanning containers for security vulnerabilities. - """) - public String allowedLevels; - - @ConfigOption - @Description(""" - Determines the container security scanning execution modality. - """) - public String mode; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowConfig.java deleted file mode 100644 index 824b8b87b6..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowConfig.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import groovy.lang.Closure; -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class WorkflowConfig implements ConfigScope { - - @ConfigOption - @Description(""" - When `true`, the pipeline will exit with a non-zero exit code if any failed tasks are ignored using the `ignore` error strategy. - """) - public boolean failOnIgnore; - - @ConfigOption - @Description(""" - Specify a closure that will be invoked at the end of a workflow run (including failed runs). - """) - public Closure onComplete; - - @ConfigOption - @Description(""" - Specify a closure that will be invoked if a workflow run is terminated. - """) - public Closure onError; - - public WorkflowOutputConfig output; - - @Description(""" - The `workflow.output` scope provides options for publishing workflow outputs. - - [Read more](https://nextflow.io/docs/latest/reference/config.html#workflow) - """) - public WorkflowOutputConfig workflowOutput; - -} diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowOutputConfig.java b/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowOutputConfig.java deleted file mode 100644 index 7a237d1ed9..0000000000 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/WorkflowOutputConfig.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2024-2025, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package nextflow.config.scopes; - -import java.util.Map; - -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; - -public class WorkflowOutputConfig implements ConfigScope { - - @ConfigOption - @Description(""" - *Currently only supported for S3.* - - Specify the media type a.k.a. [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_Types) of published files (default: `false`). Can be a string (e.g. `'text/html'`), or `true` to infer the content type from the file extension. - """) - public Object contentType; - - @ConfigOption - @Description(""" - Enable or disable publishing (default: `true`). - """) - public boolean enabled; - - @ConfigOption - @Description(""" - When `true`, the workflow will not fail if a file can't be published for some reason (default: `false`). - """) - public boolean ignoreErrors; - - @ConfigOption - @Description(""" - The file publishing method (default: `'symlink'`). - """) - public String mode; - - @ConfigOption - @Description(""" - When `true` any existing file in the specified folder will be overwritten (default: `'standard'`). - """) - public Object overwrite; - - @ConfigOption - @Description(""" - *Currently only supported for S3.* - - Specify the storage class for published files. - """) - public String storageClass; - - @ConfigOption - @Description(""" - *Currently only supported for S3.* - - Specify arbitrary tags for published files. - """) - public Map tags; - -} diff --git a/modules/nf-lineage/src/main/nextflow/lineage/config/LineageConfig.groovy b/modules/nf-lineage/src/main/nextflow/lineage/config/LineageConfig.groovy index 949dda48b3..c73ffdc7de 100644 --- a/modules/nf-lineage/src/main/nextflow/lineage/config/LineageConfig.groovy +++ b/modules/nf-lineage/src/main/nextflow/lineage/config/LineageConfig.groovy @@ -18,27 +18,40 @@ package nextflow.lineage.config import groovy.transform.CompileStatic import groovy.transform.ToString -import groovy.util.logging.Slf4j import nextflow.Global import nextflow.Session +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description /** * Model workflow data lineage config * * @author Paolo Di Tommaso */ -@Slf4j +@ScopeName("lineage") +@Description(""" + The `lineage` scope controls the generation of lineage metadata. +""") @ToString @CompileStatic -class LineageConfig { +class LineageConfig implements ConfigScope { final LineageStoreOpts store + @ConfigOption + @Description(""" + Enable generation of lineage metadata (default: `false`). + """) final boolean enabled + /* required by extension point -- do not remove */ + LineageConfig() {} + LineageConfig(Map opts) { - this.store = new LineageStoreOpts(opts.store as Map ?: Map.of()) - this.enabled = opts.enabled as boolean ?: false + this.store = new LineageStoreOpts(opts.store as Map ?: Collections.emptyMap()) + this.enabled = opts.enabled as boolean } static Map asMap() { @@ -46,9 +59,13 @@ class LineageConfig { return result != null ? result : new HashMap() } + static LineageConfig create(Map config) { + new LineageConfig(config.lineage as Map ?: Collections.emptyMap()) + } + static LineageConfig create(Session session) { if( session ) { - return new LineageConfig( session.config.navigate('lineage') as Map ?: Map.of()) + return LineageConfig.create(session.config) } else throw new IllegalStateException("Missing Nextflow session") diff --git a/modules/nf-lineage/src/main/nextflow/lineage/config/LineageStoreOpts.groovy b/modules/nf-lineage/src/main/nextflow/lineage/config/LineageStoreOpts.groovy index 9fff4a4071..4e70a6fef7 100644 --- a/modules/nf-lineage/src/main/nextflow/lineage/config/LineageStoreOpts.groovy +++ b/modules/nf-lineage/src/main/nextflow/lineage/config/LineageStoreOpts.groovy @@ -18,6 +18,9 @@ package nextflow.lineage.config import groovy.transform.CompileStatic import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model data store options @@ -26,8 +29,12 @@ import groovy.transform.ToString */ @ToString @CompileStatic -class LineageStoreOpts { +class LineageStoreOpts implements ConfigScope { + @ConfigOption + @Description(""" + The location of the lineage metadata store (default: `./.lineage`). + """) final String location LineageStoreOpts(Map opts) { diff --git a/modules/nf-lineage/src/resources/META-INF/extensions.idx b/modules/nf-lineage/src/resources/META-INF/extensions.idx index 5b327e222d..19bef36056 100644 --- a/modules/nf-lineage/src/resources/META-INF/extensions.idx +++ b/modules/nf-lineage/src/resources/META-INF/extensions.idx @@ -18,3 +18,4 @@ nextflow.lineage.DefaultLinStoreFactory nextflow.lineage.LinExtensionImpl nextflow.lineage.LinObserverFactory nextflow.lineage.cli.LinCommandImpl +nextflow.lineage.config.LineageConfig diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy index 8d1e86a5fc..6684ba2075 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchExecutor.groovy @@ -24,7 +24,6 @@ import software.amazon.awssdk.services.batch.BatchClient import software.amazon.awssdk.services.batch.model.BatchException import software.amazon.awssdk.services.ecs.model.AccessDeniedException import software.amazon.awssdk.services.cloudwatchlogs.model.ResourceNotFoundException -import groovy.transform.CompileDynamic import groovy.transform.CompileStatic import groovy.transform.PackageScope import groovy.util.logging.Slf4j @@ -194,13 +193,14 @@ class AwsBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExec reaper = createExecutorService('AWSBatch-reaper') - final pollInterval = session.getPollInterval(name, Duration.of('10 sec')) - final dumpInterval = session.getMonitorDumpInterval(name) - final capacity = session.getQueueSize(name, 1000) + final pollInterval = config.getPollInterval(name, Duration.of('10 sec')) + final dumpInterval = config.getMonitorDumpInterval(name) + final capacity = config.getQueueSize(name, 1000) final def params = [ name: name, session: session, + config: config, pollInterval: pollInterval, dumpInterval: dumpInterval, capacity: capacity @@ -234,7 +234,7 @@ class AwsBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExec // queue size can be overridden by submitter options below final qs = 5_000 - final limit = session.getExecConfigProp(name,'submitRateLimit','50/s') as String + final limit = config.getExecConfigProp(name, 'submitRateLimit', '50/s') as String final size = Runtime.runtime.availableProcessors() * 5 final opts = new ThrottlingExecutor.Options() @@ -247,17 +247,11 @@ class AwsBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExec .withKeepAlive(Duration.of('1 min')) .withAutoThrottle(true) .withMaxRetries(10) - .withOptions( getConfigOpts() ) .withPoolName(name) ThrottlingExecutor.create(opts) } - @CompileDynamic - protected Map getConfigOpts() { - session.config?.executor?.submitter as Map - } - @Override boolean isFusionEnabled() { return FusionHelper.isFusionEnabled(session) diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchScriptLauncher.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchScriptLauncher.groovy index 5b8ef302ea..6568e93832 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchScriptLauncher.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/batch/AwsBatchScriptLauncher.groovy @@ -17,6 +17,7 @@ package nextflow.cloud.aws.batch import groovy.transform.CompileStatic +import nextflow.container.ContainerHelper import nextflow.executor.BashWrapperBuilder import nextflow.processor.TaskBean import nextflow.processor.TaskRun @@ -47,6 +48,6 @@ class AwsBatchScriptLauncher extends BashWrapperBuilder { @Override protected boolean fixOwnership() { - return containerConfig?.fixOwnership + return ContainerHelper.fixOwnership(containerConfig) } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsBatchConfig.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsBatchConfig.groovy index af1480e7c7..638a36f9f2 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsBatchConfig.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsBatchConfig.groovy @@ -24,6 +24,9 @@ import groovy.util.logging.Slf4j import nextflow.SysEnv import nextflow.cloud.CloudTransferOptions import nextflow.cloud.aws.batch.AwsOptions +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.exception.ProcessUnrecoverableException import nextflow.util.Duration @@ -34,53 +37,93 @@ import nextflow.util.Duration */ @Slf4j @CompileStatic -class AwsBatchConfig implements CloudTransferOptions { +class AwsBatchConfig implements CloudTransferOptions, ConfigScope { public static final int DEFAULT_AWS_MAX_ATTEMPTS = 5 - private int maxParallelTransfers = MAX_TRANSFER - - private int maxTransferAttempts = MAX_TRANSFER_ATTEMPTS - - private Duration delayBetweenAttempts = DEFAULT_DELAY_BETWEEN_ATTEMPTS - - private String cliPath - - private String retryMode - - private Integer maxSpotAttempts - - private Boolean debug - - /** - * The job role ARN that should be used - */ - private String jobRole - - /** - * The name of the logs group used by jobs - */ - private String logsGroup - - /** - * Volume mounts - */ - private List volumes - - /** - * The share identifier for all tasks when using fair-share scheduling - */ - private String shareIdentifier - - /** - * The scheduling priority for all tasks when using fair-share scheduling (0 to 9999) - */ - private Integer schedulingPriority - - /** - * The container execution role - */ - String executionRole + @ConfigOption + @Description(""" + The path where the AWS command line tool is installed in the host AMI. + """) + final String cliPath + + @ConfigOption + @Description(""" + Delay between download attempts from S3 (default: `10 sec`). + """) + final Duration delayBetweenAttempts + + @ConfigOption + @Description(""" + The AWS Batch [Execution Role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) ARN that needs to be used to execute the Batch Job. It is mandatory when using AWS Fargate. + """) + final String executionRole + + @ConfigOption + @Description(""" + The AWS Batch Job Role ARN that needs to be used to execute the Batch Job. + """) + final String jobRole + + @ConfigOption + @Description(""" + The name of the logs group used by Batch Jobs (default: `/aws/batch/job`). + """) + final String logsGroup + + @ConfigOption + @Description(""" + Max parallel upload/download transfer operations *per job* (default: `4`). + """) + final int maxParallelTransfers + + @ConfigOption + @Description(""" + Max number of execution attempts of a job interrupted by a EC2 Spot reclaim event (default: `0`) + """) + final Integer maxSpotAttempts + + @ConfigOption + @Description(""" + Max number of downloads attempts from S3 (default: `1`). + """) + final int maxTransferAttempts + + @ConfigOption + @Description(""" + The compute platform type used by AWS Batch. Can be either `ec2` or `fargate`. Set to `fargate` to use [AWS Fargate](https://docs.aws.amazon.com/batch/latest/userguide/fargate.html). + """) + final String platformType + + @ConfigOption + @Description(""" + The [retry mode](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-retries.html) used to handle rate-limiting by AWS APIs. Can be one of `standard`, `legacy`, `adaptive`, or `built-in` (default: `standard`). + """) + final String retryMode + + @ConfigOption + @Description(""" + The scheduling priority for all tasks when using [fair-share scheduling](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/) (default: `0`). + """) + final Integer schedulingPriority + + @ConfigOption + @Description(""" + The share identifier for all tasks when using [fair-share scheduling](https://aws.amazon.com/blogs/hpc/introducing-fair-share-scheduling-for-aws-batch/). + """) + final String shareIdentifier + + @ConfigOption + @Description(""" + When `true`, jobs that cannot be scheduled due to lack of resources or misconfiguration are terminated and handled as task failures (default: `false`). + """) + final boolean terminateUnschedulableJobs + + @ConfigOption + @Description(""" + List of container mounts. Mounts can be specified as simple e.g. `/some/path` or canonical format e.g. `/host/path:/mount/path[:ro|rw]`. + """) + final List volumes /** * The path for the `s5cmd` tool as an alternative to `aws s3` CLI to upload/download files @@ -92,11 +135,6 @@ class AwsBatchConfig implements CloudTransferOptions { */ boolean fargateMode - /** - * Flag to fail and terminate unscheduled jobs. - */ - boolean terminateUnschedulableJobs - AwsBatchConfig(Map opts) { fargateMode = opts.platformType == 'fargate' cliPath = !fargateMode ? parseCliPath(opts.cliPath as String) : null @@ -119,56 +157,6 @@ class AwsBatchConfig implements CloudTransferOptions { log.warn "Unexpected value for 'aws.batch.retryMode' config setting - offending value: $retryMode - valid values: ${AwsOptions.VALID_RETRY_MODES.join(',')}" } - // ==== getters ===== - - String getCliPath() { - return cliPath - } - - int getMaxParallelTransfers() { - return maxParallelTransfers - } - - int getMaxTransferAttempts() { - return maxTransferAttempts - } - - Duration getDelayBetweenAttempts() { - return delayBetweenAttempts - } - - String getRetryMode() { - return retryMode - } - - Integer getMaxSpotAttempts() { - return maxSpotAttempts - } - - Boolean getDebug() { - return debug - } - - String getJobRole() { - return jobRole - } - - String getLogsGroup() { - return logsGroup - } - - List getVolumes() { - return volumes - } - - String getShareIdentifier() { - return shareIdentifier - } - - Integer getSchedulingPriority() { - return schedulingPriority - } - protected int defaultMaxTransferAttempts() { final env = SysEnv.get() return env.AWS_MAX_ATTEMPTS ? env.AWS_MAX_ATTEMPTS as int : DEFAULT_AWS_MAX_ATTEMPTS @@ -205,8 +193,6 @@ class AwsBatchConfig implements CloudTransferOptions { AwsBatchConfig addVolume(Path path) { assert path.scheme == 'file' - if( volumes == null ) - volumes = new ArrayList(10) def location = path.toString() if( !volumes.contains(location) ) volumes.add(location) diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy index 869c577729..f5719c1af0 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsConfig.groovy @@ -25,61 +25,73 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Global import nextflow.SysEnv +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description import nextflow.util.IniFile /** * Model AWS cloud configuration settings * * @author Paolo Di Tommaso */ +@ScopeName("aws") +@Description(""" + The `aws` scope controls the interactions with AWS, including AWS Batch and S3. +""") @Slf4j @CompileStatic -class AwsConfig { - - private AwsBatchConfig batchConfig - - private AwsS3Config s3config - - private String region - - private String accessKey - - private String secretKey - - private String profile - - private AwsS3Legacy s3Legacy - - AwsConfig(Map config) { - this.accessKey = config.accessKey - this.secretKey = config.secretKey - this.profile = getAwsProfile0(SysEnv.get(), config) - this.region = getAwsRegion(SysEnv.get(), config) - this.batchConfig = new AwsBatchConfig((Map)config.batch ?: Collections.emptyMap()) - this.s3config = new AwsS3Config((Map)config.client ?: Collections.emptyMap()) - this.s3Legacy = new AwsS3Legacy((Map)config.client ?: Collections.emptyMap()) +class AwsConfig implements ConfigScope { + + final AwsBatchConfig batch + + final AwsS3Config client + + @ConfigOption + @Description(""" + AWS region (e.g. `us-east-1`). + """) + final String region + + @ConfigOption + @Description(""" + AWS account access key. + """) + final String accessKey + + @ConfigOption + @Description(""" + AWS account secret key. + """) + final String secretKey + + @ConfigOption + @Description(""" + AWS profile from `~/.aws/credentials`. + """) + final String profile + + /* required by extension point -- do not remove */ + AwsConfig() {} + + AwsConfig(Map opts) { + this.accessKey = opts.accessKey + this.secretKey = opts.secretKey + this.profile = getAwsProfile0(SysEnv.get(), opts) + this.region = getAwsRegion(SysEnv.get(), opts) + this.batch = new AwsBatchConfig((Map)opts.batch ?: Collections.emptyMap()) + this.client = new AwsS3Config((Map)opts.client ?: Collections.emptyMap()) } - String getAccessKey() { accessKey } - - String getSecretKey() { secretKey } - List getCredentials() { return accessKey && secretKey ? List.of(accessKey, secretKey) : Collections.emptyList() } - String getProfile() { profile } - - String getRegion() { region } + AwsS3Config getS3Config() { client } - AwsS3Config getS3Config() { s3config } - - AwsBatchConfig getBatchConfig() { batchConfig } - - Map getS3LegacyClientConfig() { - return s3Legacy.getAwsClientConfig() - } + AwsBatchConfig getBatchConfig() { batch } String getS3GlobalRegion() { return !region || !s3Config.endpoint || s3Config.endpoint.contains(".amazonaws.com") @@ -136,7 +148,7 @@ class AwsConfig { final result = new LinkedHashMap(20) // -- remaining client config options - def config = getS3LegacyClientConfig() + def config = client.getAwsClientConfig() config = checkDefaultErrorRetry(config, SysEnv.get()) if( config ) { result.putAll(config) diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy index 658a533f60..cbddc08f82 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy @@ -23,7 +23,12 @@ import software.amazon.awssdk.services.s3.model.ObjectCannedACL import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.SysEnv +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.file.FileHelper +import nextflow.util.Duration +import nextflow.util.MemoryUnit /** * Model AWS S3 config settings * @@ -31,38 +36,216 @@ import nextflow.file.FileHelper */ @Slf4j @CompileStatic -class AwsS3Config { +class AwsS3Config implements ConfigScope { - private String endpoint + @ConfigOption + @Description(""" + Allow the access of public S3 buckets without providing AWS credentials (default: `false`). Any service that does not accept unsigned requests will return a service access error. + """) + final Boolean anonymous - private String storageClass + @ConfigOption + @Description(""" + The amount of time to wait (in milliseconds) when initially establishing a connection before timing out (default: `10000`). + """) + final int connectionTimeout - private String storageEncryption + final Boolean debug - private String storageKmsKeyId + @ConfigOption + @Description(""" + The AWS S3 API entry point e.g. `https://s3-us-west-1.amazonaws.com`. The endpoint must include the protocol prefix e.g. `https://`. + """) + final String endpoint - private Boolean debug + @ConfigOption + @Description(""" + The maximum number of concurrent S3 transfers used by the S3 transfer manager. By default, this setting is determined by `aws.client.targetThroughputInGbps`. Modifying this value can affect the amount of memory used for S3 transfers. + """) + final Integer maxConcurrency - private ObjectCannedACL s3Acl + @ConfigOption + @Description(""" + The maximum number of open HTTP connections used by the S3 transfer manager (default: `50`). + """) + final Integer maxConnections - private Boolean pathStyleAccess + @ConfigOption + @Description(""" + The maximum number of retry attempts for failed retryable requests (default: `-1`). + """) + final Integer maxErrorRetry - private Boolean anonymous + @ConfigOption + @Description(""" + The maximum native memory used by the S3 transfer manager. By default, this setting is determined by `aws.client.targetThroughputInGbps`. + """) + final MemoryUnit maxNativeMemory - private Boolean requesterPays + @ConfigOption + @Description(""" + The minimum part size used by the S3 transfer manager for multi-part uploads (default: `8 MB`). + """) + final MemoryUnit minimumPartSize + + @ConfigOption + @Description(""" + The object size threshold used by the S3 transfer manager for performing multi-part uploads (default: same as `aws.cllient.minimumPartSize`). + """) + final MemoryUnit multipartThreshold + + @ConfigOption + @Description(""" + The proxy host to connect through. + """) + final String proxyHost + + @ConfigOption + @Description(""" + The port to use when connecting through a proxy. + """) + final Integer proxyPort + + @ConfigOption + @Description(""" + The protocol scheme to use when connecting through a proxy. Can be `http` or `https` (default: `'http'`). + """) + final String proxyScheme + + @ConfigOption + @Description(""" + The user name to use when connecting through a proxy. + """) + final String proxyUsername + + @ConfigOption + @Description(""" + The password to use when connecting through a proxy. + """) + final String proxyPassword + + @ConfigOption + @Description(""" + Use [Requester Pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html) for S3 buckets (default: `false`). + """) + final Boolean requesterPays + + @ConfigOption(types=[String]) + @Description(""" + Specify predefined bucket permissions, also known as [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). Can be one of `Private`, `PublicRead`, `PublicReadWrite`, `AuthenticatedRead`, `LogDeliveryWrite`, `BucketOwnerRead`, `BucketOwnerFullControl`, or `AwsExecRead`. + """) + final ObjectCannedACL s3Acl + + @ConfigOption + @Description(""" + Use the path-based access model to access objects in S3-compatible storage systems (default: `false`). + """) + final Boolean s3PathStyleAccess + + @ConfigOption + @Description(""" + The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection is timed out (default: `50000`). + """) + final int socketTimeout + + @ConfigOption + @Description(""" + The S3 storage class applied to stored objects, one of \\[`STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`\\] (default: `STANDARD`). + """) + final String storageClass + + @ConfigOption + @Description(""" + The S3 server side encryption to be used when saving objects on S3. Can be `AES256` or `aws:kms` (default: none). + """) + final String storageEncryption + + @ConfigOption + @Description(""" + The AWS KMS key Id to be used to encrypt files stored in the target S3 bucket. + """) + final String storageKmsKeyId + + @ConfigOption + @Description(""" + The target network throughput (in Gbps) used by the S3 transfer manager (default: `10`). This setting is not used when `aws.client.maxConcurrency` and `aws.client.maxNativeMemory` are specified. + """) + final Double targetThroughputInGbps + + @ConfigOption + @Description(""" + The number of threads used by the S3 transfer manager (default: `10`). + """) + final Integer transferManagerThreads + + // deprecated + + @Deprecated + @ConfigOption + @Description(""" + The size of a single part in a multipart upload (default: `100 MB`). + """) + final MemoryUnit uploadChunkSize + + @Deprecated + @ConfigOption + @Description(""" + The maximum number of upload attempts after which a multipart upload returns an error (default: `5`). + """) + final Integer uploadMaxAttempts + + @Deprecated + @ConfigOption + @Description(""" + The maximum number of threads used for multipart upload (default: `10`). + """) + final Integer uploadMaxThreads + + @Deprecated + @ConfigOption + @Description(""" + The time to wait after a failed upload attempt to retry the part upload (default: `500ms`). + """) + final Duration uploadRetrySleep + + @Deprecated + @ConfigOption + @Description(""" + The S3 storage class applied to stored objects. Can be `STANDARD`, `STANDARD_IA`, `ONEZONE_IA`, or `INTELLIGENT_TIERING` (default: `STANDARD`). + """) + final String uploadStorageClass AwsS3Config(Map opts) { + this.anonymous = opts.anonymous as Boolean + this.connectionTimeout = opts.connectionTimeout != null ? opts.connectionTimeout as int : 10000 this.debug = opts.debug as Boolean this.endpoint = opts.endpoint ?: SysEnv.get('AWS_S3_ENDPOINT') if( endpoint && FileHelper.getUrlProtocol(endpoint) !in ['http','https'] ) throw new IllegalArgumentException("S3 endpoint must begin with http:// or https:// prefix - offending value: '${endpoint}'") + this.maxConcurrency = opts.maxConcurrency as Integer + this.maxConnections = opts.maxConnections as Integer + this.maxErrorRetry = opts.maxErrorRetry as Integer + this.maxNativeMemory = opts.maxNativeMemory as MemoryUnit + this.minimumPartSize = opts.minimumPartSize as MemoryUnit + this.multipartThreshold = opts.multipartThreshold as MemoryUnit + this.proxyHost = opts.proxyHost + this.proxyPort = opts.proxyPort as Integer + this.proxyScheme = opts.proxyScheme + this.proxyUsername = opts.proxyUsername + this.proxyPassword = opts.proxyPassword + this.requesterPays = opts.requesterPays as Boolean + this.s3Acl = parseS3Acl(opts.s3Acl as String) + this.s3PathStyleAccess = opts.s3PathStyleAccess as Boolean + this.socketTimeout = opts.socketTimeout != null ? opts.socketTimeout as int : 50000 this.storageClass = parseStorageClass((opts.storageClass ?: opts.uploadStorageClass) as String) // 'uploadStorageClass' is kept for legacy purposes this.storageEncryption = parseStorageEncryption(opts.storageEncryption as String) this.storageKmsKeyId = opts.storageKmsKeyId - this.pathStyleAccess = opts.s3PathStyleAccess as Boolean - this.anonymous = opts.anonymous as Boolean - this.s3Acl = parseS3Acl(opts.s3Acl as String) - this.requesterPays = opts.requesterPays as Boolean + this.targetThroughputInGbps = opts.targetThroughputInGbps as Double + this.transferManagerThreads = opts.transferManagerThreads as Integer + this.uploadChunkSize = opts.uploadChunkSize as MemoryUnit + this.uploadMaxAttempts = opts.uploadMaxAttempts as Integer + this.uploadMaxThreads = opts.uploadMaxThreads as Integer + this.uploadRetrySleep = opts.uploadRetrySleep as Duration } private String parseStorageClass(String value) { @@ -86,43 +269,41 @@ class AwsS3Config { } // ==== getters ===== - String getEndpoint() { - return endpoint - } - - String getStorageClass() { - return storageClass - } - - String getStorageEncryption() { - return storageEncryption - } - - String getStorageKmsKeyId() { - return storageKmsKeyId - } - - Boolean getDebug() { - return debug - } - - ObjectCannedACL getS3Acl() { - return s3Acl - } Boolean getPathStyleAccess() { - return pathStyleAccess - } - - Boolean getAnonymous() { - return anonymous - } - - Boolean getRequesterPays() { - return requesterPays + return s3PathStyleAccess } boolean isCustomEndpoint() { endpoint && !endpoint.endsWith(".amazonaws.com") } + + Map getAwsClientConfig() { + return [ + connection_timeout: connectionTimeout?.toString(), + max_concurrency: maxConcurrency?.toString(), + max_connections: maxConnections?.toString(), + max_error_retry: maxErrorRetry?.toString(), + max_native_memory: maxNativeMemory?.toBytes()?.toString(), + minimum_part_size: minimumPartSize?.toBytes()?.toString(), + multipart_threshold: multipartThreshold?.toBytes()?.toString(), + proxy_host: proxyHost?.toString(), + proxy_port: proxyPort?.toString(), + proxy_scheme: proxyScheme?.toString(), + proxy_username: proxyUsername?.toString(), + proxy_password: proxyPassword?.toString(), + requester_pays: requesterPays?.toString(), + s3_acl: s3Acl?.toString(), + socket_timeout: socketTimeout?.toString(), + storage_encryption: storageEncryption?.toString(), + storage_kms_key_id: storageKmsKeyId?.toString(), + target_throughput_in_gbps: targetThroughputInGbps?.toString(), + transfer_manager_threads: transferManagerThreads?.toString(), + upload_chunk_size: uploadChunkSize?.toBytes()?.toString(), + upload_max_attempts: uploadMaxAttempts?.toString(), + upload_max_threads: uploadMaxThreads?.toString(), + upload_retry_sleep: uploadRetrySleep?.toMillis()?.toString(), + upload_storage_class: storageClass?.toString() + ].findAll { k, v -> v != null } + } } diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Legacy.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Legacy.groovy deleted file mode 100644 index ce5c39d6a2..0000000000 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Legacy.groovy +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2020-2022, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.config - - -import groovy.transform.CompileStatic -import nextflow.util.Duration -import nextflow.util.MemoryUnit -import org.apache.commons.lang.StringUtils -/** - * Handle AWS S3 client legacy configuration - * - * @author Paolo Di Tommaso - */ -@CompileStatic -class AwsS3Legacy { - - private Map config - - AwsS3Legacy( Map config ) { - this.config = config - } - - Map getAwsClientConfig() { - return config != null - ? normalizeAwsClientConfig(config) - : new HashMap() - } - - static protected Map normalizeAwsClientConfig(Map client) { - - normalizeMemUnit(client, 'uploadChunkSize'); - normalizeMemUnit(client, 'minimumPartSize'); - normalizeMemUnit(client, 'multipartThreshold'); - normalizeMemUnit(client, 'maxNativeMemory'); - normalizeDuration(client, 'uploadRetrySleep'); - - - def result = [:] - client.each { String name, value -> - def newKey = name.isCamelCase() ? StringUtils.splitByCharacterTypeCamelCase(name).join('_').toLowerCase() : name - result.put(newKey,value?.toString()) - } - return result - } - - static void normalizeMemUnit(Map client, String key) { - if( client.get(key) instanceof String ) { - client.put(key, MemoryUnit.of((String)client.get(key))) - } - if( client.get(key) instanceof MemoryUnit ) { - client.put(key, ((MemoryUnit)client.get(key)).toBytes()) - } - } - - static void normalizeDuration(Map client, String key) { - if( client.get(key) instanceof String ) { - client.put(key, Duration.of((String)client.get(key))) - } - if( client.get(key) instanceof Duration ) { - client.put(key, ((Duration)client.get(key)).toMillis()) - } - } -} diff --git a/plugins/nf-amazon/src/resources/META-INF/extensions.idx b/plugins/nf-amazon/src/resources/META-INF/extensions.idx index 50517c11c4..b8d9444ea7 100644 --- a/plugins/nf-amazon/src/resources/META-INF/extensions.idx +++ b/plugins/nf-amazon/src/resources/META-INF/extensions.idx @@ -15,6 +15,7 @@ # nextflow.cloud.aws.batch.AwsBatchExecutor +nextflow.cloud.aws.config.AwsConfig nextflow.cloud.aws.util.S3PathSerializer nextflow.cloud.aws.util.S3PathFactory nextflow.cloud.aws.fusion.AwsFusionEnv diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchScriptLauncherTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchScriptLauncherTest.groovy index e729de6477..a792b4355c 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchScriptLauncherTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/batch/AwsBatchScriptLauncherTest.groovy @@ -24,6 +24,7 @@ import nextflow.Session import nextflow.SysEnv import nextflow.cloud.aws.config.AwsConfig import nextflow.cloud.aws.util.S3PathFactory +import nextflow.container.DockerConfig import nextflow.processor.TaskBean import nextflow.util.Duration import spock.lang.Specification @@ -623,13 +624,12 @@ class AwsBatchScriptLauncherTest extends Specification { name: 'Hello 1', workDir: Paths.get('/work/dir'), script: 'echo Hello world!', - containerConfig: [fixOwnership: true], + containerConfig: new DockerConfig(fixOwnership: true), input: 'Ciao ciao' ] as TaskBean, opts) when: def binding = builder.makeBinding() then: - builder.fixOwnership() >> true binding.fix_ownership == '[ ${NXF_OWNER:=\'\'} ] && (shopt -s extglob; GLOBIGNORE=\'..\'; chown -fR --from root $NXF_OWNER /work/dir/{*,.*}) || true' } diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3LegacyTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3LegacyTest.groovy deleted file mode 100644 index 82430a69d9..0000000000 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/config/AwsS3LegacyTest.groovy +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2020-2022, Seqera Labs - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package nextflow.cloud.aws.config - - -import nextflow.util.Duration -import nextflow.util.MemoryUnit -import spock.lang.Specification -/** - * - * @author Paolo Di Tommaso - */ -class AwsS3LegacyTest extends Specification{ - - def 'should normalize aws config' () { - - given: - def config = [uploadMaxThreads: 5, uploadChunkSize: 1000, uploadStorageClass: 'STANDARD' ] - - when: - def norm = AwsS3Legacy.normalizeAwsClientConfig(config) - - then: - norm.upload_storage_class == 'STANDARD' - norm.upload_chunk_size == '1000' - norm.upload_max_threads == '5' - - when: - config.uploadChunkSize = '10MB' - then: - AwsS3Legacy.normalizeAwsClientConfig(config).upload_chunk_size == '10485760' - - when: - config.uploadChunkSize = '1024' - then: - AwsS3Legacy.normalizeAwsClientConfig(config).upload_chunk_size == '1024' - - when: - config.uploadChunkSize = new MemoryUnit('2 MB') - then: - AwsS3Legacy.normalizeAwsClientConfig(config).upload_chunk_size == '2097152' - - when: - config.uploadRetrySleep = '10 sec' - then: - AwsS3Legacy.normalizeAwsClientConfig(config).upload_retry_sleep == '10000' - - when: - config.uploadRetrySleep = Duration.of('5 sec') - then: - AwsS3Legacy.normalizeAwsClientConfig(config).upload_retry_sleep == '5000' - } - -} diff --git a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy index 5b0cfa842c..5d0846baed 100644 --- a/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy +++ b/plugins/nf-amazon/src/test/nextflow/cloud/aws/nio/S3FileSystemProviderTest.groovy @@ -29,14 +29,38 @@ class S3FileSystemProviderTest extends Specification { def 'should create filesystem from config'(){ given: - def config = [client: [ anonymous: true, s3acl: 'Private', connectionTimeout: 20000, endpoint: 'https://s3.eu-west-1.amazonaws.com', - maxConcurrency: 10, maxNativeMemory: '500MB', minimumPartSize: '7MB', multipartThreshold: '32MB', - maxConnections: 100, maxErrorRetry: 3, socketTimeout: 20000, requesterPays: true, s3PathStyleAccess: true, - proxyHost: 'host.com', proxyPort: 80, proxyScheme: 'https', proxyUsername: 'user', proxyPassword: 'pass', - signerOverride: 'S3SignerType', userAgent: 'Agent1', storageEncryption: 'AES256', storageKmsKeyId: 'arn:key:id', - transferManagerThreads: 20, uploadMaxThreads: 15, uploadChunkSize: '7MB', uploadMaxAttempts: 4, uploadRetrySleep: '200ms' - ], - accessKey: '123456abc', secretKey: '78910def', profile: 'test'] + def config = [ + client: [ + anonymous: true, + s3Acl: 'Private', + connectionTimeout: 20000, + endpoint: 'https://s3.eu-west-1.amazonaws.com', + maxConcurrency: 10, + maxNativeMemory: '500MB', + minimumPartSize: '7MB', + multipartThreshold: '32MB', + maxConnections: 100, + maxErrorRetry: 3, + socketTimeout: 20000, + requesterPays: true, + s3PathStyleAccess: true, + proxyHost: 'host.com', + proxyPort: 80, + proxyScheme: 'https', + proxyUsername: 'user', + proxyPassword: 'pass', + storageEncryption: 'AES256', + storageKmsKeyId: 'arn:key:id', + transferManagerThreads: 20, + uploadMaxThreads: 15, + uploadChunkSize: '7MB', + uploadMaxAttempts: 4, + uploadRetrySleep: '200ms' + ], + accessKey: '123456abc', + secretKey: '78910def', + profile: 'test' + ] def provider = new S3FileSystemProvider(); when: def fs = provider.newFileSystem(new URI("s3:///bucket/key"), config) as S3FileSystem @@ -60,8 +84,6 @@ class S3FileSystemProviderTest extends Specification { fs.properties().getProperty('proxy_scheme') == 'https' fs.properties().getProperty('proxy_username') == 'user' fs.properties().getProperty('proxy_password') == 'pass' - fs.properties().getProperty('signer_override') == 'S3SignerType' - fs.properties().getProperty('user_agent') == 'Agent1' fs.properties().getProperty('socket_timeout') == '20000' fs.properties().getProperty('connection_timeout') == '20000' fs.properties().getProperty('max_connections') == '100' diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchExecutor.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchExecutor.groovy index af4ecd9c7e..88ed6a57fc 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchExecutor.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchExecutor.groovy @@ -48,7 +48,7 @@ class AzBatchExecutor extends Executor implements ExtensionPoint { private Path remoteBinDir - private AzConfig config + private AzConfig azConfig private AzBatchService batchService @@ -98,14 +98,14 @@ class AzBatchExecutor extends Executor implements ExtensionPoint { } protected void initBatchService() { - config = AzConfig.getConfig(session) + azConfig = AzConfig.getConfig(session) batchService = new AzBatchService(this) // Generate an account SAS token using either activeDirectory configs or storage account keys - if (!config.storage().sasToken) { - config.storage().sasToken = config.activeDirectory().isConfigured() || config.managedIdentity().isConfigured() - ? AzHelper.generateContainerSasWithActiveDirectory(workDir, config.storage().tokenDuration) - : AzHelper.generateAccountSasWithAccountKey(workDir, config.storage().tokenDuration) + if (!azConfig.storage().sasToken) { + azConfig.storage().sasToken = azConfig.activeDirectory().isConfigured() || azConfig.managedIdentity().isConfigured() + ? AzHelper.generateContainerSasWithActiveDirectory(workDir, azConfig.storage().tokenDuration) + : AzHelper.generateAccountSasWithAccountKey(workDir, azConfig.storage().tokenDuration) } Global.onCleanup((it) -> batchService.close()) @@ -123,13 +123,13 @@ class AzBatchExecutor extends Executor implements ExtensionPoint { uploadBinDir() } - @PackageScope AzConfig getConfig() { - return config + @PackageScope AzConfig getAzConfig() { + return azConfig } @Override protected TaskMonitor createTaskMonitor() { - TaskPollingMonitor.create(session, name, 1000, Duration.of('10 sec')) + TaskPollingMonitor.create(session, config, name, 1000, Duration.of('10 sec')) } @Override diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchService.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchService.groovy index 2c83d8e8b9..5d9f9f1596 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchService.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchService.groovy @@ -120,7 +120,7 @@ class AzBatchService implements Closeable { AzBatchService(AzBatchExecutor executor) { assert executor - this.config = executor.config + this.config = executor.azConfig } protected AzVmPoolSpec getPoolSpec(String poolId) { diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy index ffda1882da..a57b219535 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzBatchTaskHandler.groovy @@ -131,7 +131,7 @@ class AzBatchTaskHandler extends TaskHandler implements FusionAwareTask { } private Boolean shouldDelete() { - executor.config.batch().deleteTasksOnCompletion + executor.azConfig.batch().deleteTasksOnCompletion } protected void deleteTask(AzTaskKey taskKey, TaskRun task) { diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzFileCopyStrategy.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzFileCopyStrategy.groovy index 4ac9e072fe..0427e2ae07 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzFileCopyStrategy.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/batch/AzFileCopyStrategy.groovy @@ -46,7 +46,7 @@ class AzFileCopyStrategy extends SimpleFileCopyStrategy { AzFileCopyStrategy(TaskBean bean, AzBatchExecutor executor) { super(bean) - this.config = executor.config + this.config = executor.azConfig this.remoteBinDir = executor.remoteBinDir this.sasToken = config.storage().sasToken this.maxParallelTransfers = config.batch().maxParallelTransfers diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy index bc05264806..db4db1a337 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzActiveDirectoryOpts.groovy @@ -18,6 +18,9 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic import nextflow.SysEnv import nextflow.cloud.azure.nio.AzFileSystemProvider +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model Azure Entra (formerly Active Directory) config options @@ -25,11 +28,25 @@ import nextflow.cloud.azure.nio.AzFileSystemProvider * @author Abhinav Sharma */ @CompileStatic -class AzActiveDirectoryOpts { +class AzActiveDirectoryOpts implements ConfigScope { - String servicePrincipalId - String servicePrincipalSecret - String tenantId + @ConfigOption + @Description(""" + The service principal client ID. Defaults to environment variable `AZURE_CLIENT_ID`. + """) + final String servicePrincipalId + + @ConfigOption + @Description(""" + The service principal client secret. Defaults to environment variable `AZURE_CLIENT_SECRET`. + """) + final String servicePrincipalSecret + + @ConfigOption + @Description(""" + The Azure tenant ID. Defaults to environment variable `AZURE_TENANT_ID`. + """) + final String tenantId AzActiveDirectoryOpts(Map config, Map env = null) { assert config != null diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzBatchOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzBatchOpts.groovy index 323d682203..cf55c45148 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzBatchOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzBatchOpts.groovy @@ -24,7 +24,11 @@ import groovy.transform.CompileStatic import nextflow.Global import nextflow.Session import nextflow.cloud.CloudTransferOptions +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.PlaceholderName import nextflow.fusion.FusionHelper +import nextflow.script.dsl.Description import nextflow.util.Duration import nextflow.util.StringUtils @@ -34,7 +38,7 @@ import nextflow.util.StringUtils * @author Paolo Di Tommaso */ @CompileStatic -class AzBatchOpts implements CloudTransferOptions { +class AzBatchOpts implements ConfigScope, CloudTransferOptions { static final private Pattern ENDPOINT_PATTERN = ~/https:\/\/(\w+)\.(\w+)\.batch\.azure\.com/ @@ -44,21 +48,86 @@ class AzBatchOpts implements CloudTransferOptions { int maxTransferAttempts Duration delayBetweenAttempts - String accountName - String accountKey - String endpoint - String location - Boolean autoPoolMode - Boolean allowPoolCreation - Boolean terminateJobsOnCompletion - Boolean deleteJobsOnCompletion - Boolean deletePoolsOnCompletion - Boolean deleteTasksOnCompletion - CopyToolInstallMode copyToolInstallMode - Duration jobMaxWallClockTime - String poolIdentityClientId - - Map pools + @ConfigOption + @Description(""" + The batch service account name. Defaults to environment variable `AZURE_BATCH_ACCOUNT_NAME`. + """) + final String accountName + + @ConfigOption + @Description(""" + The batch service account key. Defaults to environment variable `AZURE_BATCH_ACCOUNT_KEY`. + """) + final String accountKey + + @ConfigOption + @Description(""" + Enable the automatic creation of batch pools specified in the Nextflow configuration file (default: `false`). + """) + final Boolean allowPoolCreation + + @ConfigOption + @Description(""" + Enable the automatic creation of batch pools depending on the pipeline resources demand (default: `true`). + """) + final Boolean autoPoolMode + + @ConfigOption(types=[String]) + @Description(""" + The mode in which the `azcopy` tool is installed by Nextflow (default: `'node'`). + """) + final CopyToolInstallMode copyToolInstallMode + + @ConfigOption + @Description(""" + Delete all jobs when the workflow completes (default: `false`). + """) + final Boolean deleteJobsOnCompletion + + @ConfigOption + @Description(""" + Delete all compute node pools when the workflow completes (default: `false`). + """) + final Boolean deletePoolsOnCompletion + + @ConfigOption + @Description(""" + Delete each task when it completes (default: `true`). + """) + final Boolean deleteTasksOnCompletion + + @ConfigOption + @Description(""" + The batch service endpoint e.g. `https://nfbatch1.westeurope.batch.azure.com`. + """) + final String endpoint + + @ConfigOption + @Description(""" + The maximum elapsed time that jobs may run, measured from the time they are created (default: `30d`). + """) + final Duration jobMaxWallClockTime + + @ConfigOption + @Description(""" + The name of the batch service region, e.g. `westeurope` or `eastus2`. Not needed when the endpoint is specified. + """) + final String location + + @ConfigOption + @Description(""" + The client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) that is available on all Azure Batch node pools. This identity is used by Fusion to authenticate to Azure storage. If set to `'auto'`, Fusion will use the first available managed identity. + """) + final String poolIdentityClientId + + @PlaceholderName("") + final Map pools + + @ConfigOption + @Description(""" + When the workflow completes, set all jobs to terminate on task completion (default: `true`). + """) + final Boolean terminateJobsOnCompletion AzBatchOpts(Map config, Map env=null) { assert config!=null @@ -67,12 +136,12 @@ class AzBatchOpts implements CloudTransferOptions { accountKey = config.accountKey ?: sysEnv.get('AZURE_BATCH_ACCOUNT_KEY') endpoint = config.endpoint location = config.location - autoPoolMode = config.autoPoolMode - allowPoolCreation = config.allowPoolCreation + autoPoolMode = config.autoPoolMode as Boolean + allowPoolCreation = config.allowPoolCreation as Boolean terminateJobsOnCompletion = config.terminateJobsOnCompletion != Boolean.FALSE - deleteJobsOnCompletion = config.deleteJobsOnCompletion - deletePoolsOnCompletion = config.deletePoolsOnCompletion - deleteTasksOnCompletion = config.deleteTasksOnCompletion + deleteJobsOnCompletion = config.deleteJobsOnCompletion as Boolean + deletePoolsOnCompletion = config.deletePoolsOnCompletion as Boolean + deleteTasksOnCompletion = config.deleteTasksOnCompletion as Boolean jobMaxWallClockTime = config.jobMaxWallClockTime ? config.jobMaxWallClockTime as Duration : Duration.of('30d') poolIdentityClientId = config.poolIdentityClientId pools = parsePools(config.pools instanceof Map ? config.pools as Map : Collections.emptyMap()) diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzConfig.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzConfig.groovy index 98979212f0..7f6f5a0583 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzConfig.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzConfig.groovy @@ -19,52 +19,62 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic import nextflow.Global import nextflow.Session +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description /** * Model Azure settings defined in the nextflow.config file * * @author Paolo Di Tommaso */ +@ScopeName("azure") +@Description(""" + The `azure` scope allows you to configure the interactions with Azure, including Azure Batch and Azure Blob Storage. +""") @CompileStatic -class AzConfig { +class AzConfig implements ConfigScope { - private AzCopyOpts azcopyOpts + private AzCopyOpts azcopy - private AzStorageOpts storageOpts + private AzStorageOpts storage - private AzBatchOpts batchOpts + private AzBatchOpts batch - private AzRegistryOpts registryOpts + private AzRegistryOpts registry - private AzRetryConfig retryConfig + private AzRetryConfig retryPolicy - private AzActiveDirectoryOpts activeDirectoryOpts + private AzActiveDirectoryOpts activeDirectory - private AzManagedIdentityOpts managedIdentityOpts + private AzManagedIdentityOpts managedIdentity + + /* required by extension point -- do not remove */ + AzConfig() {} AzConfig(Map azure) { - this.batchOpts = new AzBatchOpts( (Map)azure.batch ?: Collections.emptyMap() ) - this.storageOpts = new AzStorageOpts( (Map)azure.storage ?: Collections.emptyMap() ) - this.registryOpts = new AzRegistryOpts( (Map)azure.registry ?: Collections.emptyMap() ) - this.azcopyOpts = new AzCopyOpts( (Map)azure.azcopy ?: Collections.emptyMap() ) - this.retryConfig = new AzRetryConfig( (Map)azure.retryPolicy ?: Collections.emptyMap() ) - this.activeDirectoryOpts = new AzActiveDirectoryOpts((Map) azure.activeDirectory ?: Collections.emptyMap()) - this.managedIdentityOpts = new AzManagedIdentityOpts((Map) azure.managedIdentity ?: Collections.emptyMap()) + this.batch = new AzBatchOpts( (Map)azure.batch ?: Collections.emptyMap() ) + this.storage = new AzStorageOpts( (Map)azure.storage ?: Collections.emptyMap() ) + this.registry = new AzRegistryOpts( (Map)azure.registry ?: Collections.emptyMap() ) + this.azcopy = new AzCopyOpts( (Map)azure.azcopy ?: Collections.emptyMap() ) + this.retryPolicy = new AzRetryConfig( (Map)azure.retryPolicy ?: Collections.emptyMap() ) + this.activeDirectory = new AzActiveDirectoryOpts((Map) azure.activeDirectory ?: Collections.emptyMap()) + this.managedIdentity = new AzManagedIdentityOpts((Map) azure.managedIdentity ?: Collections.emptyMap()) } - AzCopyOpts azcopy() { azcopyOpts } + AzCopyOpts azcopy() { azcopy } - AzBatchOpts batch() { batchOpts } + AzBatchOpts batch() { batch } - AzStorageOpts storage() { storageOpts } + AzStorageOpts storage() { storage } - AzRegistryOpts registry() { registryOpts } + AzRegistryOpts registry() { registry } - AzRetryConfig retryConfig() { retryConfig } + AzRetryConfig retryConfig() { retryPolicy } - AzActiveDirectoryOpts activeDirectory() { activeDirectoryOpts } + AzActiveDirectoryOpts activeDirectory() { activeDirectory } - AzManagedIdentityOpts managedIdentity() { managedIdentityOpts } + AzManagedIdentityOpts managedIdentity() { managedIdentity } static AzConfig getConfig(Session session) { if( !session ) diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzCopyOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzCopyOpts.groovy index c0842ad4a4..da7979f34c 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzCopyOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzCopyOpts.groovy @@ -17,6 +17,9 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model Azure azcopy tool config settings from nextflow config file @@ -24,13 +27,22 @@ import groovy.transform.CompileStatic * @author Abhinav Sharma */ @CompileStatic -class AzCopyOpts { +class AzCopyOpts implements ConfigScope { static public final String DEFAULT_BLOCK_SIZE = "4" static public final String DEFAULT_BLOB_TIER = "None" - String blockSize - String blobTier + @ConfigOption + @Description(""" + The block size (in MB) used by `azcopy` to transfer files between Azure Blob Storage and compute nodes (default: `4`). + """) + final String blockSize + + @ConfigOption + @Description(""" + The blob [access tier](https://learn.microsoft.com/en-us/azure/storage/blobs/access-tiers-overview) used by `azcopy` to upload files to Azure Blob Storage. Valid options are `None`, `Hot`, or `Cool` (default: `None`). + """) + final String blobTier AzCopyOpts() { this.blockSize = DEFAULT_BLOCK_SIZE diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzFileShareOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzFileShareOpts.groovy index d3948801fd..b45d8ff2cb 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzFileShareOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzFileShareOpts.groovy @@ -20,6 +20,9 @@ import com.google.common.hash.Hasher import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.CacheFunnel import nextflow.util.CacheHelper @@ -31,27 +34,36 @@ import nextflow.util.CacheHelper @ToString(includeNames = true, includePackage = false) @EqualsAndHashCode @CompileStatic -class AzFileShareOpts implements CacheFunnel { +class AzFileShareOpts implements CacheFunnel, ConfigScope { - static public final String DEFAULT_MOUNT_OPTIONS = '-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp' + static public final String DEFAULT_MOUNT_OPTIONS = '-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp' - String mountPath - String mountOptions + @ConfigOption + @Description(""" + The file share mount path. + """) + final String mountPath - AzFileShareOpts(Map opts) { - assert opts != null - this.mountPath = opts.mountPath ?: '' - this.mountOptions = opts.mountOptions ?: DEFAULT_MOUNT_OPTIONS - } + @ConfigOption + @Description(""" + The file share mount options. + """) + final String mountOptions - AzFileShareOpts() { - this(Collections.emptyMap()) - } + AzFileShareOpts(Map opts) { + assert opts != null + this.mountPath = opts.mountPath ?: '' + this.mountOptions = opts.mountOptions ?: DEFAULT_MOUNT_OPTIONS + } - @Override - Hasher funnel(Hasher hasher, CacheHelper.HashMode mode) { - hasher.putUnencodedChars(mountPath) - hasher.putUnencodedChars(mountOptions) - return hasher - } + AzFileShareOpts() { + this(Collections.emptyMap()) + } + + @Override + Hasher funnel(Hasher hasher, CacheHelper.HashMode mode) { + hasher.putUnencodedChars(mountPath) + hasher.putUnencodedChars(mountOptions) + return hasher + } } diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzManagedIdentityOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzManagedIdentityOpts.groovy index 9d3f4e3299..401f59beaf 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzManagedIdentityOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzManagedIdentityOpts.groovy @@ -19,6 +19,9 @@ import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString import nextflow.cloud.azure.nio.AzFileSystemProvider +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model Azure managed identity config options @@ -28,11 +31,19 @@ import nextflow.cloud.azure.nio.AzFileSystemProvider @ToString(includePackage = false, includeNames = true) @EqualsAndHashCode @CompileStatic -class AzManagedIdentityOpts { +class AzManagedIdentityOpts implements ConfigScope { - String clientId + @ConfigOption + @Description(""" + The client ID for an Azure [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview). Defaults to environment variable `AZURE_MANAGED_IDENTITY_USER`. + """) + final String clientId - boolean system + @ConfigOption + @Description(""" + When `true`, use the system-assigned [managed identity](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview) to authenticate Azure resources. Defaults to environment variable `AZURE_MANAGED_IDENTITY_SYSTEM`. + """) + final boolean system AzManagedIdentityOpts(Map config) { assert config != null diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzPoolOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzPoolOpts.groovy index 2f424e5b5d..f44825466f 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzPoolOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzPoolOpts.groovy @@ -22,6 +22,9 @@ import com.google.common.hash.Hasher import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.CacheFunnel import nextflow.util.CacheHelper import nextflow.util.Duration @@ -33,7 +36,7 @@ import nextflow.util.Duration @ToString(includeNames = true, includePackage = false) @EqualsAndHashCode @CompileStatic -class AzPoolOpts implements CacheFunnel { +class AzPoolOpts implements CacheFunnel, ConfigScope { static public final String DEFAULT_PUBLISHER = "microsoft-dsvm" static public final String DEFAULT_OFFER = "ubuntu-hpc" @@ -43,30 +46,110 @@ class AzPoolOpts implements CacheFunnel { static public final String DEFAULT_SHARE_ROOT_PATH = "/mnt/batch/tasks/fsmounts" static public final Duration DEFAULT_SCALE_INTERVAL = Duration.of('5 min') - String runAs - boolean privileged - String publisher - String offer - String fileShareRootPath - String sku + @ConfigOption + @Description(""" + Enable autoscaling feature for the pool identified with ``. + """) + final boolean autoScale + + @ConfigOption + @Description(""" + The internal root mount point when mounting File Shares. Must be `/mnt/resource/batch/tasks/fsmounts` for CentOS nodes or `/mnt/batch/tasks/fsmounts` for Ubuntu nodes (default: CentOS). + """) + final String fileShareRootPath + + @ConfigOption + @Description(""" + Enable the use of low-priority VMs (default: `false`). + """) + final boolean lowPriority + + @ConfigOption + @Description(""" + The max number of virtual machines when using auto scaling. + """) + final Integer maxVmCount + + @ConfigOption + @Description(""" + The mount options for mounting the file shares (default: `-o vers=3.0,dir_mode=0777,file_mode=0777,sec=ntlmssp`). + """) + final String mountOptions + + @ConfigOption + @Description(""" + The offer type of the virtual machine type used by the pool identified with `` (default: `centos-container`). + """) + final String offer + + @ConfigOption + @Description(""" + Enable the task to run with elevated access. Ignored if `runAs` is set (default: `false`). + """) + final boolean privileged + + @ConfigOption + @Description(""" + The publisher of virtual machine type used by the pool identified with `` (default: `microsoft-azure-batch`). + """) + final String publisher + + @ConfigOption + @Description(""" + The username under which the task is run. The user must already exist on each node of the pool. + """) + final String runAs + + @ConfigOption + @Description(""" + The [scale formula](https://docs.microsoft.com/en-us/azure/batch/batch-automatic-scaling) for the pool identified with ``. + """) + final String scaleFormula + + @ConfigOption + @Description(""" + The interval at which to automatically adjust the Pool size according to the autoscale formula. Must be at least 5 minutes and at most 168 hours (default: `10 mins`). + """) + final Duration scaleInterval + + @ConfigOption + @Description(""" + The scheduling policy for the pool identified with ``. Can be either `spread` or `pack` (default: `spread`). + """) + final String schedulePolicy + + @ConfigOption + @Description(""" + The ID of the Compute Node agent SKU which the pool identified with `` supports (default: `batch.node.centos 8`). + """) + final String sku + + final AzStartTaskOpts startTask + + @ConfigOption + @Description(""" + The subnet ID of a virtual network in which to create the pool. + """) + final String virtualNetwork + + @ConfigOption + @Description(""" + The number of virtual machines provisioned by the pool identified with ``. + """) + final Integer vmCount + + @ConfigOption + @Description(""" + The virtual machine type used by the pool identified with ``. + """) + final String vmType + OSType osType = DEFAULT_OS_TYPE ImageVerificationType verification = ImageVerificationType.VERIFIED - String vmType - Integer vmCount = 1 - boolean autoScale - String scaleFormula - Duration scaleInterval - Integer maxVmCount - - String schedulePolicy // spread | pack String registry String userName String password - - String virtualNetwork - boolean lowPriority - AzStartTaskOpts startTask AzPoolOpts() { this(Collections.emptyMap()) diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy index 386571cf2d..096677facf 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRegistryOpts.groovy @@ -18,6 +18,9 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic import nextflow.SysEnv +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model Azure Batch registry config settings from nextflow config file @@ -25,11 +28,25 @@ import nextflow.SysEnv * @author Manuele Simi */ @CompileStatic -class AzRegistryOpts { +class AzRegistryOpts implements ConfigScope { - String server - String userName - String password + @ConfigOption + @Description(""" + The container registry from which to pull the Docker images (default: `docker.io`). + """) + final String server + + @ConfigOption + @Description(""" + The username to connect to a private container registry. + """) + final String userName + + @ConfigOption + @Description(""" + The password to connect to a private container registry. + """) + final String password AzRegistryOpts() { this(Collections.emptyMap()) diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRetryConfig.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRetryConfig.groovy index e6923d3d8c..7930011292 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRetryConfig.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzRetryConfig.groovy @@ -20,6 +20,9 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.Duration /** @@ -30,10 +33,30 @@ import nextflow.util.Duration @ToString(includePackage = false, includeNames = true) @EqualsAndHashCode @CompileStatic -class AzRetryConfig { +class AzRetryConfig implements ConfigScope { + + @ConfigOption + @Description(""" + Delay when retrying failed API requests (default: `250ms`). + """) Duration delay = Duration.of('250ms') + + @ConfigOption + @Description(""" + Max delay when retrying failed API requests (default: `90s`). + """) Duration maxDelay = Duration.of('90s') + + @ConfigOption + @Description(""" + Max attempts when retrying failed API requests (default: `10`). + """) int maxAttempts = 10 + + @ConfigOption + @Description(""" + Jitter value when retrying failed API requests (default: `0.25`). + """) double jitter = 0.25 AzRetryConfig() { diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStartTaskOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStartTaskOpts.groovy index 9e61f2d7ee..346b730124 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStartTaskOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStartTaskOpts.groovy @@ -16,15 +16,27 @@ package nextflow.cloud.azure.config import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description /** * Model Azure pool start task options */ @CompileStatic -class AzStartTaskOpts { +class AzStartTaskOpts implements ConfigScope { - String script - boolean privileged + @ConfigOption + @Description(""" + The `startTask` that is executed as the node joins the Azure Batch node pool. + """) + final String script + + @ConfigOption + @Description(""" + Enable the `startTask` to run with elevated access (default: `false`). + """) + final boolean privileged AzStartTaskOpts() { this(Collections.emptyMap()) diff --git a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy index d3e6bb3259..6bd2343902 100644 --- a/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy +++ b/plugins/nf-azure/src/main/nextflow/cloud/azure/config/AzStorageOpts.groovy @@ -20,6 +20,10 @@ import groovy.transform.CompileStatic import nextflow.SysEnv import nextflow.cloud.azure.batch.AzHelper import nextflow.cloud.azure.nio.AzFileSystemProvider +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.PlaceholderName +import nextflow.script.dsl.Description import nextflow.util.Duration /** * Parse Azure settings from nextflow config file @@ -27,13 +31,34 @@ import nextflow.util.Duration * @author Paolo Di Tommaso */ @CompileStatic -class AzStorageOpts { +class AzStorageOpts implements ConfigScope { - String accountKey - String accountName + @ConfigOption + @Description(""" + The blob storage account key. Defaults to environment variable `AZURE_STORAGE_ACCOUNT_KEY`. + """) + final String accountKey + + @ConfigOption + @Description(""" + The blob storage account name. Defaults to environment variable `AZURE_STORAGE_ACCOUNT_NAME`. + """) + final String accountName + + @ConfigOption + @Description(""" + The blob storage shared access signature (SAS) token, which can be provided instead of an account key. Defaults to environment variable `AZURE_STORAGE_SAS_TOKEN`. + """) String sasToken - Duration tokenDuration - Map fileShares + + @ConfigOption + @Description(""" + The duration of the SAS token generated by Nextflow when the `sasToken` option is *not* specified (default: `48h`). + """) + final Duration tokenDuration + + @PlaceholderName("") + final Map fileShares AzStorageOpts(Map config, Map env=SysEnv.get()) { diff --git a/plugins/nf-azure/src/resources/META-INF/extensions.idx b/plugins/nf-azure/src/resources/META-INF/extensions.idx index 685c239bca..8899ed69a7 100644 --- a/plugins/nf-azure/src/resources/META-INF/extensions.idx +++ b/plugins/nf-azure/src/resources/META-INF/extensions.idx @@ -15,6 +15,7 @@ # nextflow.cloud.azure.batch.AzBatchExecutor +nextflow.cloud.azure.config.AzConfig nextflow.cloud.azure.file.AzPathFactory nextflow.cloud.azure.file.AzPathSerializer nextflow.cloud.azure.fusion.AzFusionEnv diff --git a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy index 2c805ff363..353383dcd4 100644 --- a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy +++ b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzBatchServiceTest.groovy @@ -47,6 +47,16 @@ class AzBatchServiceTest extends Specification { SysEnv.pop() // <-- restore the system host env } + def createExecutor(config) { + return Mock(AzBatchExecutor) { + getAzConfig() >> config + } + } + + def createExecutor() { + createExecutor(new AzConfig([:])) + } + def 'should make job id'() { given: def task = Mock(TaskRun) { @@ -55,9 +65,7 @@ class AzBatchServiceTest extends Specification { } } and: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() and: def svc = new AzBatchService(exec) @@ -72,9 +80,7 @@ class AzBatchServiceTest extends Specification { def 'should list locations' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -86,9 +92,7 @@ class AzBatchServiceTest extends Specification { def 'should list vm names for location' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -100,9 +104,7 @@ class AzBatchServiceTest extends Specification { def 'should list all VMs in region' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -129,9 +131,7 @@ class AzBatchServiceTest extends Specification { def 'should fail to list VMs in region' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -144,9 +144,7 @@ class AzBatchServiceTest extends Specification { def 'should get size for vm' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -172,9 +170,7 @@ class AzBatchServiceTest extends Specification { @Unroll def 'should compute vm score' () { given: - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) - } + def exec = createExecutor() def svc = new AzBatchService(exec) expect: @@ -194,7 +190,7 @@ class AzBatchServiceTest extends Specification { def 'should find best match for northeurope' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -220,7 +216,7 @@ class AzBatchServiceTest extends Specification { def 'should match familty' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) expect: @@ -245,7 +241,7 @@ class AzBatchServiceTest extends Specification { @Unroll def 'should compute mem slots' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) expect: @@ -267,7 +263,7 @@ class AzBatchServiceTest extends Specification { @Unroll def 'should compute slots' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) expect: @@ -292,7 +288,7 @@ class AzBatchServiceTest extends Specification { def 'should configure default startTask' () { given: def CONFIG = [batch:[copyToolInstallMode: 'node']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) when: @@ -306,7 +302,7 @@ class AzBatchServiceTest extends Specification { def 'should configure custom startTask' () { given: def CONFIG = [batch:[copyToolInstallMode: 'node']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) when: @@ -321,7 +317,7 @@ class AzBatchServiceTest extends Specification { def 'should configure not install AzCopy because copyToolInstallMode is off' () { given: def CONFIG = [batch:[copyToolInstallMode: 'off']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) when: @@ -334,7 +330,7 @@ class AzBatchServiceTest extends Specification { def 'should configure not install AzCopy because copyToolInstallMode is task and quote command' () { given: def CONFIG = [batch:[copyToolInstallMode: 'task']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) when: @@ -347,7 +343,7 @@ class AzBatchServiceTest extends Specification { def 'should create null startTask because no options are enabled' () { given: def CONFIG = [batch:[copyToolInstallMode: 'off']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) when: @@ -359,7 +355,7 @@ class AzBatchServiceTest extends Specification { def 'should configure privileged startTask' () { given: def CONFIG = [batch:[copyToolInstallMode: 'node']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) def svc = new AzBatchService(exec) and: @@ -371,7 +367,7 @@ class AzBatchServiceTest extends Specification { def 'should check scaling formula' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -383,7 +379,7 @@ class AzBatchServiceTest extends Specification { def 'should check scaling formula for low-priority' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) when: @@ -395,7 +391,7 @@ class AzBatchServiceTest extends Specification { def 'should check formula vars' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() def svc = new AzBatchService(exec) and: def opts = new AzPoolOpts(vmCount: 3, maxVmCount: 10, scaleInterval: Duration.of('5 min')) @@ -415,7 +411,7 @@ class AzBatchServiceTest extends Specification { def LOC = 'europe' def TYPE = Mock(AzVmType) and: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() AzBatchService svc = Spy(AzBatchService, constructorArgs: [exec]) when: @@ -452,7 +448,7 @@ class AzBatchServiceTest extends Specification { def 'should check poolid' () { given: - def exec = Mock(AzBatchExecutor) { getConfig() >> new AzConfig([:]) } + def exec = createExecutor() AzBatchService svc = Spy(AzBatchService, constructorArgs: [exec]) when: @@ -486,7 +482,7 @@ class AzBatchServiceTest extends Specification { def TYPE = 'Standard_X1' def VM = new AzVmType(name: TYPE, numberOfCores: CPUS) and: - def exec = Mock(AzBatchExecutor) { getConfig() >> CFG } + def exec = createExecutor(CFG) AzBatchService svc = Spy(AzBatchService, constructorArgs: [exec]) and: def TASK = Mock(TaskRun) { @@ -511,7 +507,7 @@ class AzBatchServiceTest extends Specification { def 'should set jobs to automatically terminate by default' () { given: def CONFIG = [:] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -522,7 +518,7 @@ class AzBatchServiceTest extends Specification { def 'should not cleanup jobs by default' () { given: def CONFIG = [:] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -533,7 +529,7 @@ class AzBatchServiceTest extends Specification { def 'should cleanup jobs if specified' () { given: def CONFIG = [batch:[deleteJobsOnCompletion: true]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -544,7 +540,7 @@ class AzBatchServiceTest extends Specification { def 'should not cleanup pools by default' () { given: def CONFIG = [:] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -555,7 +551,7 @@ class AzBatchServiceTest extends Specification { def 'should cleanup pools with autoPoolMode' () { given: def CONFIG = [batch:[autoPoolMode: true, deletePoolsOnCompletion: true]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -566,7 +562,7 @@ class AzBatchServiceTest extends Specification { def 'should cleanup pools with allowPoolCreation' () { given: def CONFIG = [batch:[allowPoolCreation: true, deletePoolsOnCompletion: true]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -578,7 +574,7 @@ class AzBatchServiceTest extends Specification { def 'should not cleanup pools without autoPoolMode or allowPoolCreation' () { given: def CONFIG = [batch:[deletePoolsOnCompletion: true]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: svc.close() @@ -590,7 +586,7 @@ class AzBatchServiceTest extends Specification { given: def POOL_ID = 'foo' def CONFIG = [batch:[location: 'northeurope', pools: [(POOL_ID): [vmType: 'Standard_D2_v2']]]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: @@ -608,7 +604,7 @@ class AzBatchServiceTest extends Specification { given: def POOL_ID = 'foo' def CONFIG = [batch:[location: 'northeurope']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(AzBatchService, constructorArgs:[exec]) when: @@ -626,7 +622,7 @@ class AzBatchServiceTest extends Specification { given: def retryCfg = [delay: '100ms', maxDelay: '200ms', maxAttempts: 300] def CONFIG = [batch:[location: 'northeurope'], retryPolicy: retryCfg] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(new AzBatchService(exec)) when: @@ -641,7 +637,7 @@ class AzBatchServiceTest extends Specification { def 'should create apply policy' () { given: def CONFIG = [batch:[location: 'northeurope']] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService svc = Spy(new AzBatchService(exec)) expect: @@ -656,7 +652,7 @@ class AzBatchServiceTest extends Specification { def POOL_ID = 'my-pool' def SAS = '123' def CONFIG = [storage: [sasToken: SAS]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService azure = Spy(new AzBatchService(exec)) and: def TASK = Mock(TaskRun) { @@ -690,7 +686,7 @@ class AzBatchServiceTest extends Specification { def SAS = '123' def CONFIG = [storage: [sasToken: SAS]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService azure = Spy(new AzBatchService(exec)) def session = Mock(Session) { getConfig() >>[fusion:[enabled:false]] @@ -736,7 +732,7 @@ class AzBatchServiceTest extends Specification { def SAS = '123' def CONFIG = [storage: [sasToken: SAS, fileShares: [file1: [mountOptions: 'mountOptions1', mountPath: 'mountPath1']]]] - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService azure = Spy(new AzBatchService(exec)) def session = Mock(Session) { getConfig() >>[fusion:[enabled:false]] @@ -783,7 +779,7 @@ class AzBatchServiceTest extends Specification { def WORKDIR = FileSystemPathFactory.parse('az://foo/work/dir') and: def POOL_ID = 'my-pool' - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(AZURE) } + def exec = createExecutor(new AzConfig(AZURE)) AzBatchService azure = Spy(new AzBatchService(exec)) and: def TASK = Mock(TaskRun) { @@ -820,7 +816,7 @@ class AzBatchServiceTest extends Specification { def 'should create user-assigned managed identity credentials token' () { given: def config = Mock(AzConfig) - def exec = Mock(AzBatchExecutor) {getConfig() >> new AzConfig(CONFIG) } + def exec = createExecutor(CONFIG) AzBatchService service = Spy(new AzBatchService(exec)) when: @@ -840,12 +836,11 @@ class AzBatchServiceTest extends Specification { def 'should use pool identity client id for fusion tasks' () { given: def POOL_IDENTITY_CLIENT_ID = 'pool-identity-123' - def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([ - batch: [poolIdentityClientId: POOL_IDENTITY_CLIENT_ID], - storage: [sasToken: 'test-sas-token', accountName: 'testaccount'] - ]) - } + def CONFIG = new AzConfig([ + batch: [poolIdentityClientId: POOL_IDENTITY_CLIENT_ID], + storage: [sasToken: 'test-sas-token', accountName: 'testaccount'] + ]) + def exec = createExecutor(CONFIG) def service = new AzBatchService(exec) and: @@ -866,7 +861,7 @@ class AzBatchServiceTest extends Specification { def 'should cache job id' () { given: - def exec = Mock(AzBatchExecutor) + def exec = createExecutor() def service = Spy(new AzBatchService(exec)) and: def p1 = Mock(TaskProcessor) @@ -917,7 +912,7 @@ class AzBatchServiceTest extends Specification { def 'should test safeCreatePool' () { given: - def exec = Mock(AzBatchExecutor) + def exec = createExecutor() def service = Spy(new AzBatchService(exec)) def spec = Mock(AzVmPoolSpec) { getPoolId() >> 'test-pool' @@ -972,7 +967,7 @@ class AzBatchServiceTest extends Specification { def 'should test createJobConstraints method with Duration input' () { given: - def exec = Mock(AzBatchExecutor) + def exec = createExecutor() def service = new AzBatchService(exec) def nfDuration = TIME_STR ? nextflow.util.Duration.of(TIME_STR) : null @@ -998,7 +993,7 @@ class AzBatchServiceTest extends Specification { def 'should create task constraints' () { given: - def exec = Mock(AzBatchExecutor) + def exec = createExecutor() def service = new AzBatchService(exec) def task = Mock(TaskRun) { getConfig() >> Mock(TaskConfig) { diff --git a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzFileCopyStrategyTest.groovy b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzFileCopyStrategyTest.groovy index 436fcb2ce9..9c64e3e6bd 100644 --- a/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzFileCopyStrategyTest.groovy +++ b/plugins/nf-azure/src/test/nextflow/cloud/azure/batch/AzFileCopyStrategyTest.groovy @@ -70,7 +70,7 @@ class AzFileCopyStrategyTest extends Specification { def workDir = mockAzPath( 'az://my-data/work/dir' ) def token = '12345' def config = new AzConfig([storage:[sasToken: token]]) - def executor = Mock(AzBatchExecutor) { getConfig() >> config } + def executor = Mock(AzBatchExecutor) { getAzConfig() >> config } when: def binding = new AzBatchScriptLauncher([ @@ -203,7 +203,7 @@ class AzFileCopyStrategyTest extends Specification { def token = '12345' def config = new AzConfig([storage:[sasToken: token]]) def executor = Mock(AzBatchExecutor) { - getConfig() >> config + getAzConfig() >> config getRemoteBinDir() >> remoteBin } @@ -345,7 +345,7 @@ class AzFileCopyStrategyTest extends Specification { def input2 = mockAzPath('az://my-data/work/dir/file2.txt') def token = '12345' def config = new AzConfig([storage:[sasToken: token]]) - def executor = Mock(AzBatchExecutor) { getConfig() >> config } + def executor = Mock(AzBatchExecutor) { getAzConfig() >> config } when: def binding = new AzBatchScriptLauncher([ diff --git a/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy b/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy index 1969345af5..380f7626bd 100644 --- a/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy +++ b/plugins/nf-azure/src/test/nextflow/executor/BashWrapperBuilderWithAzTest.groovy @@ -113,7 +113,7 @@ class BashWrapperBuilderWithAzTest extends Specification { ]) def exec = Mock(AzBatchExecutor) { - getConfig() >> new AzConfig([:]) + getAzConfig() >> new AzConfig([:]) } and: def copy = new AzFileCopyStrategy(bean, exec) diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/GoogleOpts.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/GoogleOpts.groovy index 8171f834ef..bb88932b00 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/GoogleOpts.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/GoogleOpts.groovy @@ -25,7 +25,12 @@ import groovy.transform.ToString import groovy.util.logging.Slf4j import nextflow.Session import nextflow.SysEnv +import nextflow.cloud.google.batch.client.BatchConfig import nextflow.cloud.google.config.GoogleStorageOpts +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description import nextflow.exception.AbortOperationException import nextflow.util.Duration /** @@ -33,37 +38,69 @@ import nextflow.util.Duration * * @author Paolo Di Tommaso */ +@ScopeName("google") +@Description(""" + The `google` scope allows you to configure the interactions with Google Cloud, including Google Cloud Batch and Google Cloud Storage. +""") @Slf4j @ToString(includeNames = true, includePackage = false) @CompileStatic -class GoogleOpts { +class GoogleOpts implements ConfigScope { static final public String DEFAULT_LOCATION = 'us-central1' static Map env = SysEnv.get() - private String projectId - private String location + @ConfigOption + @Description(""" + The Google Cloud project ID to use for pipeline execution. + """) + String project + + @ConfigOption + @Description(""" + The Google Cloud location where jobs are executed (default: `us-central1`). + """) + final String location + + @ConfigOption + @Description(""" + Use the given Google Cloud project ID as the billing project for storage access (default: `false`). Required when accessing data from [requester pays](https://cloud.google.com/storage/docs/requester-pays) buckets. + """) + final boolean enableRequesterPaysBuckets + + @ConfigOption + @Description(""" + The HTTP connection timeout for Cloud Storage API requests (default: `'60s'`). + """) + final Duration httpConnectTimeout + + @ConfigOption + @Description(""" + The HTTP read timeout for Cloud Storage API requests (default: `'60s'`). + """) + final Duration httpReadTimeout + + final BatchConfig batch + + final GoogleStorageOpts storage + private File credsFile - private boolean enableRequesterPaysBuckets - private Duration httpConnectTimeout - private Duration httpReadTimeout - private GoogleStorageOpts storageOpts - - String getProjectId() { projectId } - String getLocation() { location ?: DEFAULT_LOCATION } - boolean getEnableRequesterPaysBuckets() { enableRequesterPaysBuckets } - Duration getHttpConnectTimeout() { httpConnectTimeout } - Duration getHttpReadTimeout() { httpReadTimeout } - GoogleStorageOpts getStorageOpts() { storageOpts } + + String getProjectId() { project } + GoogleStorageOpts getStorageOpts() { storage } + + /* required by extension point -- do not remove */ + GoogleOpts() {} GoogleOpts(Map opts) { - projectId = opts.project as String - location = opts.location as String + project = opts.project + location = opts.location ?: DEFAULT_LOCATION enableRequesterPaysBuckets = opts.enableRequesterPaysBuckets as boolean httpConnectTimeout = opts.httpConnectTimeout ? opts.httpConnectTimeout as Duration : Duration.of('60s') httpReadTimeout = opts.httpReadTimeout ? opts.httpReadTimeout as Duration : Duration.of('60s') - storageOpts = new GoogleStorageOpts( opts.storage as Map ?: Map.of() ) + batch = new BatchConfig( opts.batch as Map ?: Collections.emptyMap() ) + storage = new GoogleStorageOpts( opts.storage as Map ?: Collections.emptyMap() ) } @Memoized @@ -78,7 +115,7 @@ class GoogleOpts { } protected static GoogleOpts fromSession0(Map config) { - final result = new GoogleOpts( config.google as Map ?: Map.of() ) + final result = new GoogleOpts( config.google as Map ?: Collections.emptyMap() ) if( result.enableRequesterPaysBuckets && !result.projectId ) throw new IllegalArgumentException("Config option 'google.enableRequesterPaysBuckets' cannot be honoured because the Google project Id has not been specified - Provide it by adding the option 'google.project' in the nextflow.config file") @@ -123,13 +160,13 @@ class GoogleOpts { def credsPath = env.get('GOOGLE_APPLICATION_CREDENTIALS') if( credsPath && (projectId = getProjectIdFromCreds(credsPath)) ) { config.credsFile = new File(credsPath) - if( !config.projectId ) - config.projectId = projectId - else if( config.projectId != projectId ) - throw new AbortOperationException("Project Id `$config.projectId` declared in the nextflow config file does not match the one expected by credentials file: $credsPath") + if( !config.project ) + config.project = projectId + else if( config.project != projectId ) + throw new AbortOperationException("Project Id `$config.project` declared in the nextflow config file does not match the one expected by credentials file: $credsPath") } - if( !config.projectId ) { + if( !config.project ) { throw new AbortOperationException("Missing Google project Id -- Specify it adding the setting `google.project='your-project-id'` in the nextflow.config file") } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchExecutor.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchExecutor.groovy index 17a06ae563..11063ad065 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchExecutor.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchExecutor.groovy @@ -25,6 +25,7 @@ import com.google.cloud.storage.contrib.nio.CloudStoragePath import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.SysEnv +import nextflow.cloud.google.GoogleOpts import nextflow.cloud.google.batch.client.BatchClient import nextflow.cloud.google.batch.client.BatchConfig import nextflow.cloud.google.batch.logging.BatchLogging @@ -52,14 +53,15 @@ import org.pf4j.ExtensionPoint class GoogleBatchExecutor extends Executor implements ExtensionPoint, TaskArrayExecutor { private BatchClient client - private BatchConfig config + private GoogleOpts googleOpts private Path remoteBinDir private BatchLogging logging private final Set deletedJobs = new HashSet<>() BatchClient getClient() { return client } - BatchConfig getConfig() { return config } + GoogleOpts getGoogleOpts() { return googleOpts } + BatchConfig getBatchConfig() { return googleOpts.batch } Path getRemoteBinDir() { return remoteBinDir } BatchLogging getLogging() { logging } @@ -94,13 +96,13 @@ class GoogleBatchExecutor extends Executor implements ExtensionPoint, TaskArrayE } protected void createConfig() { - this.config = BatchConfig.create(session) - log.debug "[GOOGLE BATCH] Executor config=$config" + this.googleOpts = GoogleOpts.create(session) + log.debug "[GOOGLE BATCH] Executor config=$googleOpts" } protected void createClient() { - this.client = new BatchClient(config) - this.logging = new BatchLogging(config) + this.client = new BatchClient(googleOpts) + this.logging = new BatchLogging(googleOpts) } @Override @@ -114,7 +116,7 @@ class GoogleBatchExecutor extends Executor implements ExtensionPoint, TaskArrayE @Override protected TaskMonitor createTaskMonitor() { - TaskPollingMonitor.create(session, name, 1000, Duration.of('10 sec')) + TaskPollingMonitor.create(session, config, name, 1000, Duration.of('10 sec')) } @Override diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchScriptLauncher.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchScriptLauncher.groovy index 74d3ed9899..176ad46123 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchScriptLauncher.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchScriptLauncher.groovy @@ -25,7 +25,7 @@ import com.google.cloud.batch.v1.Volume import com.google.cloud.storage.contrib.nio.CloudStoragePath import groovy.transform.CompileStatic import groovy.util.logging.Slf4j -import nextflow.cloud.google.batch.client.BatchConfig +import nextflow.cloud.google.GoogleOpts import nextflow.executor.BashWrapperBuilder import nextflow.extension.FilesEx import nextflow.processor.TaskBean @@ -45,7 +45,7 @@ class GoogleBatchScriptLauncher extends BashWrapperBuilder implements GoogleBatc private static final String MOUNT_ROOT = '/mnt/disks' - private BatchConfig config + private GoogleOpts config private CloudStoragePath remoteWorkDir private Path remoteBinDir private Set buckets = new HashSet<>() @@ -146,10 +146,10 @@ class GoogleBatchScriptLauncher extends BashWrapperBuilder implements GoogleBatc final result = new ArrayList(10) for( String it : buckets ) { final mountOptions = new LinkedList() - if( config && config.gcsfuseOptions ) - mountOptions.addAll(config.gcsfuseOptions) - if( config && config.googleOpts.enableRequesterPaysBuckets ) - mountOptions << "--billing-project ${config.googleOpts.projectId}".toString() + if( config && config.batch.gcsfuseOptions ) + mountOptions.addAll(config.batch.gcsfuseOptions) + if( config && config.enableRequesterPaysBuckets ) + mountOptions.add("--billing-project ${config.projectId}".toString()) result.add( Volume.newBuilder() @@ -184,7 +184,7 @@ class GoogleBatchScriptLauncher extends BashWrapperBuilder implements GoogleBatc return remoteWorkDir.resolve(TaskRun.CMD_INFILE) } - GoogleBatchScriptLauncher withConfig(BatchConfig config) { + GoogleBatchScriptLauncher withConfig(GoogleOpts config) { this.config = config return this } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy index 1cb3a0e4e1..2b67dfd75b 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/GoogleBatchTaskHandler.groovy @@ -37,6 +37,7 @@ import groovy.transform.CompileStatic import groovy.transform.PackageScope import groovy.util.logging.Slf4j import nextflow.cloud.google.batch.client.BatchClient +import nextflow.cloud.google.batch.client.BatchConfig import nextflow.cloud.types.CloudMachineInfo import nextflow.cloud.types.PriceModel import nextflow.exception.ProcessException @@ -68,6 +69,8 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { private GoogleBatchExecutor executor + private BatchConfig batchConfig + private Path exitFile private Path outputFile @@ -111,6 +114,7 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { super(task) this.client = executor.getClient() this.executor = executor + this.batchConfig = executor.batchConfig this.jobId = customJobName(task) ?: "nf-${task.hashLog.replace('/','')}-${System.currentTimeMillis()}" // those files are access via NF runtime, keep based on CloudStoragePath this.outputFile = task.workDir.resolve(TaskRun.CMD_OUTFILE) @@ -129,7 +133,7 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { */ protected String customJobName(TaskRun task) { try { - final custom = (Closure)executor.session?.getExecConfigProp(executor.name, 'jobName', null) + final custom = (Closure) executor.config.getExecConfigProp(executor.name, 'jobName', null) if( !custom ) return null @@ -149,7 +153,7 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { else { final taskBean = task.toTaskBean() return new GoogleBatchScriptLauncher(taskBean, executor.remoteBinDir) - .withConfig(executor.config) + .withConfig(executor.googleOpts) .withIsArray(task.isArray()) } } @@ -219,8 +223,8 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { if( disk && !disk.type ) computeResource.setBootDiskMib( disk.request.getMega() ) // otherwise use config setting - else if( executor.config.bootDiskSize ) - computeResource.setBootDiskMib( executor.config.bootDiskSize.getMega() ) + else if( batchConfig.bootDiskSize ) + computeResource.setBootDiskMib( batchConfig.bootDiskSize.getMega() ) // container if( !task.container ) @@ -257,17 +261,17 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { .addAllVolumes( launcher.getVolumes() ) // retry on spot reclaim - if( executor.config.maxSpotAttempts ) { + if( batchConfig.maxSpotAttempts ) { // Note: Google Batch uses the special exit status 50001 to signal // the execution was terminated due a spot reclaim. When this happens // The policy re-execute the jobs automatically up to `maxSpotAttempts` times taskSpec - .setMaxRetryCount( executor.config.maxSpotAttempts ) + .setMaxRetryCount( batchConfig.maxSpotAttempts ) .addLifecyclePolicies( LifecyclePolicy.newBuilder() .setActionCondition( LifecyclePolicy.ActionCondition.newBuilder() - .addAllExitCodes(executor.config.autoRetryExitCodes) + .addAllExitCodes(batchConfig.autoRetryExitCodes) ) .setAction(LifecyclePolicy.Action.RETRY_TASK) ) @@ -278,23 +282,23 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { final allocationPolicy = AllocationPolicy.newBuilder() final instancePolicyOrTemplate = AllocationPolicy.InstancePolicyOrTemplate.newBuilder() - if( executor.config.getAllowedLocations() ) + if( batchConfig.getAllowedLocations() ) allocationPolicy.setLocation( AllocationPolicy.LocationPolicy.newBuilder() - .addAllAllowedLocations( executor.config.getAllowedLocations() ) + .addAllAllowedLocations( batchConfig.getAllowedLocations() ) ) - if( executor.config.serviceAccountEmail ) + if( batchConfig.serviceAccountEmail ) allocationPolicy.setServiceAccount( ServiceAccount.newBuilder() - .setEmail( executor.config.serviceAccountEmail ) + .setEmail( batchConfig.serviceAccountEmail ) ) allocationPolicy.putAllLabels( task.config.getResourceLabels() ) // Add network tags if configured - if( executor.config.networkTags ) - allocationPolicy.addAllTags( executor.config.networkTags ) + if( batchConfig.networkTags ) + allocationPolicy.addAllTags( batchConfig.networkTags ) // use instance template if specified if( task.config.getMachineType()?.startsWith('template://') ) { @@ -304,23 +308,23 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { if( task.config.getDisk() ) log.warn1 'Process directive `disk` ignored because an instance template was specified' - if( executor.config.getBootDiskImage() ) + if( batchConfig.getBootDiskImage() ) log.warn1 'Config option `google.batch.bootDiskImage` ignored because an instance template was specified' - if( executor.config.cpuPlatform ) + if( batchConfig.cpuPlatform ) log.warn1 'Config option `google.batch.cpuPlatform` ignored because an instance template was specified' - if( executor.config.networkTags ) + if( batchConfig.networkTags ) log.warn1 'Config option `google.batch.networkTags` ignored because an instance template was specified' - if( executor.config.preemptible ) + if( batchConfig.preemptible ) log.warn1 'Config option `google.batch.premptible` ignored because an instance template was specified' - if( executor.config.spot ) + if( batchConfig.spot ) log.warn1 'Config option `google.batch.spot` ignored because an instance template was specified' instancePolicyOrTemplate - .setInstallGpuDrivers( executor.config.getInstallGpuDrivers() ) + .setInstallGpuDrivers( batchConfig.getInstallGpuDrivers() ) .setInstanceTemplate( task.config.getMachineType().minus('template://') ) } @@ -328,8 +332,8 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { else { final instancePolicy = AllocationPolicy.InstancePolicy.newBuilder() - if( executor.config.getBootDiskImage() ) - instancePolicy.setBootDisk( AllocationPolicy.Disk.newBuilder().setImage( executor.config.getBootDiskImage() ) ) + if( batchConfig.getBootDiskImage() ) + instancePolicy.setBootDisk( AllocationPolicy.Disk.newBuilder().setImage( batchConfig.getBootDiskImage() ) ) if( fusionEnabled() && !disk ) { disk = new DiskResource(request: '375 GB', type: 'local-ssd') @@ -393,13 +397,13 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { ) } - if( executor.config.cpuPlatform ) - instancePolicy.setMinCpuPlatform( executor.config.cpuPlatform ) + if( batchConfig.cpuPlatform ) + instancePolicy.setMinCpuPlatform( batchConfig.cpuPlatform ) - if( executor.config.preemptible ) + if( batchConfig.preemptible ) instancePolicy.setProvisioningModel( AllocationPolicy.ProvisioningModel.PREEMPTIBLE ) - if( executor.config.spot ) + if( batchConfig.spot ) instancePolicy.setProvisioningModel( AllocationPolicy.ProvisioningModel.SPOT ) instancePolicyOrTemplate.setPolicy( instancePolicy ) @@ -411,15 +415,15 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { final networkInterface = AllocationPolicy.NetworkInterface.newBuilder() def hasNetworkPolicy = false - if( executor.config.network ) { + if( batchConfig.network ) { hasNetworkPolicy = true - networkInterface.setNetwork( executor.config.network ) + networkInterface.setNetwork( batchConfig.network ) } - if( executor.config.subnetwork ) { + if( batchConfig.subnetwork ) { hasNetworkPolicy = true - networkInterface.setSubnetwork( executor.config.subnetwork ) + networkInterface.setSubnetwork( batchConfig.subnetwork ) } - if( executor.config.usePrivateAddress ) { + if( batchConfig.usePrivateAddress ) { hasNetworkPolicy = true networkInterface.setNoExternalIpAddress( true ) } @@ -625,7 +629,7 @@ class GoogleBatchTaskHandler extends TaskHandler implements FusionAwareTask { final location = client.location final cpus = config.getCpus() final memory = config.getMemory() ? config.getMemory().toMega().toInteger() : 1024 - final spot = executor.config.spot ?: executor.config.preemptible + final spot = batchConfig.spot ?: batchConfig.preemptible final machineType = config.getMachineType() final families = machineType ? machineType.tokenize(',') : List.of() final priceModel = spot ? PriceModel.spot : PriceModel.standard diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy index 02850ef5b6..46f6353615 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchClient.groovy @@ -44,6 +44,7 @@ import dev.failsafe.event.ExecutionAttemptedEvent import dev.failsafe.function.CheckedSupplier import groovy.transform.CompileStatic import groovy.util.logging.Slf4j +import nextflow.cloud.google.GoogleOpts import nextflow.util.TestOnly /** * Implements Google Batch HTTP client @@ -57,20 +58,20 @@ class BatchClient { protected String projectId protected String location protected BatchServiceClient batchServiceClient - protected BatchConfig config + protected GoogleOpts config private Map arrayTaskStatus = new ConcurrentHashMap() - BatchClient(BatchConfig config) { + BatchClient(GoogleOpts config) { this.config = config - this.projectId = config.googleOpts.projectId - this.location = config.googleOpts.location + this.projectId = config.projectId + this.location = config.location this.batchServiceClient = createBatchService(config) } @TestOnly protected BatchClient() {} - protected CredentialsProvider createCredentialsProvider(BatchConfig config) { + protected CredentialsProvider createCredentialsProvider(GoogleOpts config) { if( !config.getCredentials() ) return null return new CredentialsProvider() { @@ -81,7 +82,7 @@ class BatchClient { } } - protected BatchServiceClient createBatchService(BatchConfig config) { + protected BatchServiceClient createBatchService(GoogleOpts config) { final provider = createCredentialsProvider(config) if( provider ) { log.debug "[GOOGLE BATCH] Creating service client with config credentials" @@ -156,7 +157,7 @@ class BatchClient { * @return The {@link dev.failsafe.RetryPolicy} instance */ protected RetryPolicy retryPolicy(Predicate cond) { - final cfg = config.getRetryConfig() + final cfg = config.batch.getRetryConfig() final listener = new EventListener>() { @Override void accept(ExecutionAttemptedEvent event) throws Throwable { diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy index d8285e0646..984fa5930f 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/client/BatchConfig.groovy @@ -21,6 +21,9 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.Session import nextflow.cloud.google.GoogleOpts +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.MemoryUnit /** * Model Google Batch config settings @@ -29,7 +32,7 @@ import nextflow.util.MemoryUnit */ @Slf4j @CompileStatic -class BatchConfig { +class BatchConfig implements ConfigScope { static final private int DEFAULT_MAX_SPOT_ATTEMPTS = 0 @@ -37,70 +40,119 @@ class BatchConfig { static final private List DEFAULT_GCSFUSE_OPTS = List.of('-o rw', '-implicit-dirs') - private GoogleOpts googleOpts - private GoogleCredentials credentials - private List allowedLocations - private String bootDiskImage - private MemoryUnit bootDiskSize - private String cpuPlatform - private int maxSpotAttempts - private boolean installGpuDrivers - private boolean preemptible - private boolean spot - private boolean usePrivateAddress - private String network - private String subnetwork - private String serviceAccountEmail - private List networkTags - private BatchRetryConfig retryConfig - private List autoRetryExitCodes - private List gcsfuseOptions - - GoogleOpts getGoogleOpts() { return googleOpts } - GoogleCredentials getCredentials() { return credentials } - List getAllowedLocations() { allowedLocations } - String getBootDiskImage() { bootDiskImage } - MemoryUnit getBootDiskSize() { bootDiskSize } - String getCpuPlatform() { cpuPlatform } - int getMaxSpotAttempts() { maxSpotAttempts } - boolean getInstallGpuDrivers() { installGpuDrivers } - boolean getPreemptible() { preemptible } - boolean getSpot() { spot } - boolean getUsePrivateAddress() { usePrivateAddress } - String getNetwork() { network } - String getSubnetwork() { subnetwork } - String getServiceAccountEmail() { serviceAccountEmail } - List getNetworkTags() { networkTags } - BatchRetryConfig getRetryConfig() { retryConfig } - List getAutoRetryExitCodes() { autoRetryExitCodes } - List getGcsfuseOptions() { gcsfuseOptions } - - static BatchConfig create(Session session) { - final result = new BatchConfig() - result.googleOpts = GoogleOpts.create(session) - result.credentials = result.googleOpts.credentials - result.allowedLocations = session.config.navigate('google.batch.allowedLocations', List.of()) as List - result.bootDiskImage = session.config.navigate('google.batch.bootDiskImage') - result.bootDiskSize = session.config.navigate('google.batch.bootDiskSize') as MemoryUnit - result.cpuPlatform = session.config.navigate('google.batch.cpuPlatform') - result.maxSpotAttempts = session.config.navigate('google.batch.maxSpotAttempts', DEFAULT_MAX_SPOT_ATTEMPTS) as int - result.installGpuDrivers = session.config.navigate('google.batch.installGpuDrivers',false) - result.preemptible = session.config.navigate('google.batch.preemptible',false) - result.spot = session.config.navigate('google.batch.spot',false) - result.usePrivateAddress = session.config.navigate('google.batch.usePrivateAddress',false) - result.network = session.config.navigate('google.batch.network') - result.subnetwork = session.config.navigate('google.batch.subnetwork') - result.serviceAccountEmail = session.config.navigate('google.batch.serviceAccountEmail') - result.networkTags = session.config.navigate('google.batch.networkTags', List.of()) as List - result.retryConfig = new BatchRetryConfig( session.config.navigate('google.batch.retryPolicy') as Map ?: Map.of() ) - result.autoRetryExitCodes = session.config.navigate('google.batch.autoRetryExitCodes', DEFAULT_RETRY_LIST) as List - result.gcsfuseOptions = session.config.navigate('google.batch.gcsfuseOptions', DEFAULT_GCSFUSE_OPTS) as List - return result - } + @ConfigOption + @Description(""" + The set of [allowed locations](https://cloud.google.com/batch/docs/reference/rest/v1/projects.locations.jobs#locationpolicy) for VMs to be provisioned (default: no restriction). + """) + final List allowedLocations + + @ConfigOption + @Description(""" + The list of exit codes that should be automatically retried by Google Batch when `google.batch.maxSpotAttempts` is greater than 0 (default: `[50001]`). + + [Read more](https://cloud.google.com/batch/docs/troubleshooting#reserved-exit-codes) + """) + final List autoRetryExitCodes + + @ConfigOption + @Description(""" + The image URI of the virtual machine boot disk, e.g `batch-debian` (default: none). + + [Read more](https://cloud.google.com/batch/docs/vm-os-environment-overview#vm-os-image-options) + """) + final String bootDiskImage + + @ConfigOption + @Description(""" + The size of the virtual machine boot disk, e.g `50.GB` (default: none). + """) + final MemoryUnit bootDiskSize + + @ConfigOption + @Description(""" + The [minimum CPU Platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#specifications), e.g. `'Intel Skylake'` (default: none). + """) + final String cpuPlatform + + @ConfigOption + @Description(""" + List of custom mount options for `gcsfuse` (default: `['-o rw', '-implicit-dirs']`). + """) + final List gcsfuseOptions + + @ConfigOption + @Description(""" + """) + final boolean installGpuDrivers + + @ConfigOption + @Description(""" + Max number of execution attempts of a job interrupted by a Compute Engine Spot reclaim event (default: `0`). + """) + final int maxSpotAttempts - @Override - String toString(){ - return "BatchConfig[googleOpts=$googleOpts" + @ConfigOption + @Description(""" + The URL of an existing network resource to which the VM will be attached. + """) + final String network + + @ConfigOption + @Description(""" + The [network tags](https://cloud.google.com/vpc/docs/add-remove-network-tags) to be applied to the instances created by Google Batch jobs (e.g., `['allow-ssh', 'allow-http']`). + """) + final List networkTags + + @ConfigOption + @Description(""" + """) + final boolean preemptible + + final BatchRetryConfig retry + + @ConfigOption + @Description(""" + The Google service account email to use for the pipeline execution. If not specified, the default Compute Engine service account for the project will be used. + """) + final String serviceAccountEmail + + @ConfigOption + @Description(""" + Enable the use of spot virtual machines (default: `false`). + """) + final boolean spot + + @ConfigOption + @Description(""" + The URL of an existing subnetwork resource in the network to which the VM will be attached. + """) + final String subnetwork + + @ConfigOption + @Description(""" + Do not provision public IP addresses for VMs, such that they only have an internal IP address (default: `false`). + """) + final boolean usePrivateAddress + + BatchConfig(Map opts) { + allowedLocations = opts.allowedLocations as List ?: Collections.emptyList() + autoRetryExitCodes = opts.autoRetryExitCodes as List ?: DEFAULT_RETRY_LIST + bootDiskImage = opts.bootDiskImage + bootDiskSize = opts.bootDiskSize as MemoryUnit + cpuPlatform = opts.cpuPlatform + gcsfuseOptions = opts.gcsfuseOptions as List ?: DEFAULT_GCSFUSE_OPTS + installGpuDrivers = opts.installGpuDrivers as boolean + maxSpotAttempts = opts.maxSpotAttempts != null ? opts.maxSpotAttempts as int : DEFAULT_MAX_SPOT_ATTEMPTS + network = opts.network + networkTags = opts.networkTags as List ?: Collections.emptyList() + preemptible = opts.preemptible as boolean + retry = new BatchRetryConfig( opts.retryPolicy as Map ?: Collections.emptyMap() ) + serviceAccountEmail = opts.serviceAccountEmail + spot = opts.spot as boolean + subnetwork = opts.subnetwork + usePrivateAddress = opts.usePrivateAddress as boolean } + BatchRetryConfig getRetryConfig() { retry } + } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/batch/logging/BatchLogging.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/batch/logging/BatchLogging.groovy index f00be651db..66f10efbdb 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/batch/logging/BatchLogging.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/batch/logging/BatchLogging.groovy @@ -21,24 +21,26 @@ import com.google.cloud.logging.LogEntry import com.google.cloud.logging.Logging import com.google.cloud.logging.LoggingOptions import com.google.cloud.logging.Severity +import groovy.transform.CompileStatic import groovy.transform.Memoized import groovy.transform.PackageScope import groovy.util.logging.Slf4j -import nextflow.cloud.google.batch.client.BatchConfig +import nextflow.cloud.google.GoogleOpts /** * Batch logging client * * @author Paolo Di Tommaso */ @Slf4j +@CompileStatic class BatchLogging implements Closeable { private LoggingOptions opts private String projectId private volatile Logging logging0 - BatchLogging(BatchConfig config) { - final creds = config.googleOpts.credentials - this.projectId = config.googleOpts.projectId + BatchLogging(GoogleOpts config) { + final creds = config.credentials + this.projectId = config.projectId this.opts = LoggingOptions .newBuilder() .setCredentials(creds) .setProjectId(this.projectId) .build() } @@ -85,7 +87,7 @@ class BatchLogging implements Closeable { } } - synchronized protected loggingService() { + synchronized protected Logging loggingService() { if( logging0==null ) { logging0 = opts.getService() } diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleRetryOpts.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleRetryOpts.groovy index ba385622f5..b384940429 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleRetryOpts.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleRetryOpts.groovy @@ -18,6 +18,9 @@ package nextflow.cloud.google.config import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.Duration /** @@ -26,10 +29,24 @@ import nextflow.util.Duration * @author Paolo Di Tommaso */ @CompileStatic -class GoogleRetryOpts { +class GoogleRetryOpts implements ConfigScope { + @ConfigOption + @Description(""" + Max attempts when retrying failed API requests to Cloud Storage (default: `10`). + """) final int maxAttempts + + @ConfigOption + @Description(""" + Delay multiplier when retrying failed API requests to Cloud Storage (default: `2.0`). + """) final double multiplier + + @ConfigOption + @Description(""" + Max delay when retrying failed API requests to Cloud Storage (default: `'90s'`). + """) final Duration maxDelay GoogleRetryOpts(Map opts) { diff --git a/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleStorageOpts.groovy b/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleStorageOpts.groovy index 303f6e2c68..f5f8316846 100644 --- a/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleStorageOpts.groovy +++ b/plugins/nf-google/src/main/nextflow/cloud/google/config/GoogleStorageOpts.groovy @@ -17,16 +17,19 @@ package nextflow.cloud.google.config +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigScope /** * * @author Paolo Di Tommaso */ -class GoogleStorageOpts { +@CompileStatic +class GoogleStorageOpts implements ConfigScope { final GoogleRetryOpts retryPolicy GoogleStorageOpts(Map opts) { - retryPolicy = new GoogleRetryOpts( opts.retryPolicy as Map ?: Map.of() ) + retryPolicy = new GoogleRetryOpts( opts.retryPolicy as Map ?: Collections.emptyMap() ) } } diff --git a/plugins/nf-google/src/resources/META-INF/extensions.idx b/plugins/nf-google/src/resources/META-INF/extensions.idx index 952dbc38a2..001785f1bc 100644 --- a/plugins/nf-google/src/resources/META-INF/extensions.idx +++ b/plugins/nf-google/src/resources/META-INF/extensions.idx @@ -14,6 +14,7 @@ # limitations under the License. # +nextflow.cloud.google.GoogleOpts nextflow.cloud.google.batch.GoogleBatchExecutor nextflow.cloud.google.util.GsPathSerializer nextflow.cloud.google.util.GsPathFactory diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchScriptLauncherTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchScriptLauncherTest.groovy index 6a8888c86f..86776b174e 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchScriptLauncherTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchScriptLauncherTest.groovy @@ -51,12 +51,12 @@ class GoogleBatchScriptLauncherTest extends Specification{ def 'should compute volume mounts' () { given: def launcher = new GoogleBatchScriptLauncher() - launcher.config = Mock(BatchConfig) { - getGoogleOpts() >> Mock(GoogleOpts) { - getProjectId() >> 'my-project' - getEnableRequesterPaysBuckets() >> true + launcher.config = Mock(GoogleOpts) { + getBatch() >> Mock(BatchConfig) { + getGcsfuseOptions() >> ['-o rw', '-implicit-dirs', '-o allow_other', '--uid=1000'] } - getGcsfuseOptions() >> ['-o rw', '-implicit-dirs', '-o allow_other', '--uid=1000'] + getProjectId() >> 'my-project' + enableRequesterPaysBuckets >> true } and: def PATH1 = CloudStorageFileSystem.forBucket('alpha').getPath('/data/sample1.bam') diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy index e578365cf1..aa27e0e81e 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/GoogleBatchTaskHandlerTest.groovy @@ -36,6 +36,7 @@ import nextflow.cloud.google.batch.client.BatchClient import nextflow.cloud.google.batch.client.BatchConfig import nextflow.cloud.types.PriceModel import nextflow.executor.Executor +import nextflow.executor.ExecutorConfig import nextflow.executor.res.AcceleratorResource import nextflow.executor.res.DiskResource import nextflow.processor.TaskBean @@ -59,7 +60,7 @@ class GoogleBatchTaskHandlerTest extends Specification { def WORK_DIR = CloudStorageFileSystem.forBucket('foo').getPath('/scratch') def CONTAINER_IMAGE = 'debian:latest' def exec = Mock(GoogleBatchExecutor) { - getConfig() >> Mock(BatchConfig) + getBatchConfig() >> Mock(BatchConfig) } and: def bean = new TaskBean(workDir: WORK_DIR, inputFiles: [:]) @@ -140,19 +141,19 @@ class GoogleBatchTaskHandlerTest extends Specification { def TIMEOUT = Duration.of('1 hour') and: def exec = Mock(GoogleBatchExecutor) { - getConfig() >> Mock(BatchConfig) { + getBatchConfig() >> Mock(BatchConfig) { getAllowedLocations() >> ['zones/us-central1-a', 'zones/us-central1-c'] getBootDiskSize() >> BOOT_DISK getBootDiskImage() >> BOOT_IMAGE getCpuPlatform() >> CPU_PLATFORM getMaxSpotAttempts() >> 5 getAutoRetryExitCodes() >> [50001,50002] - getSpot() >> true + spot >> true getNetwork() >> 'net-1' getNetworkTags() >> ['tag1', 'tag2'] getServiceAccountEmail() >> 'foo@bar.baz' getSubnetwork() >> 'subnet-1' - getUsePrivateAddress() >> true + usePrivateAddress >> true } } and: @@ -257,8 +258,11 @@ class GoogleBatchTaskHandlerTest extends Specification { def CONTAINER_IMAGE = 'debian:latest' and: def bean = new TaskBean(workDir: WORK_DIR, inputFiles: [:]) - def sess = Mock(Session) - def exec = new GoogleBatchExecutor(session: sess) + def config = Mock(ExecutorConfig) + def exec = Mock(GoogleBatchExecutor) { + getConfig() >> config + getBatchConfig() >> Mock(BatchConfig) + } and: def task = Mock(TaskRun) { @@ -274,14 +278,14 @@ class GoogleBatchTaskHandlerTest extends Specification { when: def result = handler.customJobName(task) then: - 1 * sess.getExecConfigProp(_,'jobName', null) >> null + 1 * config.getExecConfigProp(_, 'jobName', null) >> null and: result == null when: result = handler.customJobName(task) then: - 1 * sess.getExecConfigProp(_,'jobName', null) >> { return { "foo-${task.hashLog}" } } + 1 * config.getExecConfigProp(_, 'jobName', null) >> { return { "foo-${task.hashLog}" } } and: result == 'foo-abcd1234' @@ -294,7 +298,7 @@ class GoogleBatchTaskHandlerTest extends Specification { def CONTAINER_IMAGE = 'debian:latest' def INSTANCE_TEMPLATE = 'instance-template' def exec = Mock(GoogleBatchExecutor) { - getConfig() >> Mock(BatchConfig) { + getBatchConfig() >> Mock(BatchConfig) { getInstallGpuDrivers() >> true } } @@ -381,7 +385,7 @@ class GoogleBatchTaskHandlerTest extends Specification { def WORK_DIR = CloudStorageFileSystem.forBucket('foo').getPath('/scratch') def CONTAINER_IMAGE = 'debian:latest' def exec = Mock(GoogleBatchExecutor) { - getConfig() >> Mock(BatchConfig) + getBatchConfig() >> Mock(BatchConfig) } and: def bean = new TaskBean(workDir: WORK_DIR, inputFiles: [:]) @@ -441,7 +445,7 @@ class GoogleBatchTaskHandlerTest extends Specification { def WORK_DIR = CloudStorageFileSystem.forBucket('foo').getPath('/scratch') def CONTAINER_IMAGE = 'debian:latest' def exec = Mock(GoogleBatchExecutor) { - getConfig() >> Mock(BatchConfig) + getBatchConfig() >> Mock(BatchConfig) } def bean = new TaskBean(workDir: WORK_DIR, inputFiles: [:]) def task = Mock(TaskRun) { @@ -511,7 +515,7 @@ class GoogleBatchTaskHandlerTest extends Specification { } def exec = Mock(GoogleBatchExecutor) { getClient() >> client - getConfig() >> Mock(BatchConfig) { getSpot()>>false } + getBatchConfig() >> Mock(BatchConfig) { getSpot()>>false } isCloudinfoEnabled() >> true } def handler = Spy(new GoogleBatchTaskHandler(task, exec)) @@ -539,7 +543,7 @@ class GoogleBatchTaskHandlerTest extends Specification { } def exec = Mock(GoogleBatchExecutor) { getClient() >> client - getConfig() >> Mock(BatchConfig) { getSpot()>>false } + getBatchConfig() >> Mock(BatchConfig) { getSpot()>>false } isCloudinfoEnabled() >> false } def handler = Spy(new GoogleBatchTaskHandler(task, exec)) diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy index c6fc08f104..6a2e043221 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/client/BatchConfigTest.groovy @@ -29,12 +29,8 @@ class BatchConfigTest extends Specification { @Requires({System.getenv('GOOGLE_APPLICATION_CREDENTIALS')}) def 'should create batch config' () { - given: - def CONFIG = [:] - def session = Mock(Session) { getConfig()>>CONFIG } - when: - def config = BatchConfig.create(session) + def config = new BatchConfig([:]) then: !config.getSpot() and: @@ -49,20 +45,17 @@ class BatchConfigTest extends Specification { @Requires({System.getenv('GOOGLE_APPLICATION_CREDENTIALS')}) def 'should create batch config with custom settings' () { given: - def CONFIG = [google: [ - batch: [ - spot: true, - maxSpotAttempts: 8, - autoRetryExitCodes: [50001, 50003, 50005], - retryPolicy: [maxAttempts: 10], - bootDiskImage: 'batch-foo', - bootDiskSize: '100GB' - ] - ] ] - def session = Mock(Session) { getConfig()>>CONFIG } + def opts = [ + spot: true, + maxSpotAttempts: 8, + autoRetryExitCodes: [50001, 50003, 50005], + retryPolicy: [maxAttempts: 10], + bootDiskImage: 'batch-foo', + bootDiskSize: '100GB' + ] when: - def config = BatchConfig.create(session) + def config = new BatchConfig(opts) then: config.getSpot() and: diff --git a/plugins/nf-google/src/test/nextflow/cloud/google/batch/logging/BatchLoggingTest.groovy b/plugins/nf-google/src/test/nextflow/cloud/google/batch/logging/BatchLoggingTest.groovy index 395dbe98e5..c7370c88fa 100644 --- a/plugins/nf-google/src/test/nextflow/cloud/google/batch/logging/BatchLoggingTest.groovy +++ b/plugins/nf-google/src/test/nextflow/cloud/google/batch/logging/BatchLoggingTest.groovy @@ -29,8 +29,8 @@ import com.google.cloud.logging.Payload.StringPayload import com.google.cloud.logging.Severity import groovy.util.logging.Slf4j import nextflow.Session +import nextflow.cloud.google.GoogleOpts import nextflow.cloud.google.batch.client.BatchClient -import nextflow.cloud.google.batch.client.BatchConfig import spock.lang.IgnoreIf import spock.lang.Requires import spock.lang.Specification @@ -90,7 +90,7 @@ class BatchLoggingTest extends Specification { def 'should fetch logs' () { given: def sess = Mock(Session) { getConfig() >> [:] } - def config = BatchConfig.create(sess) + def config = GoogleOpts.create(sess) and: def batchClient = new BatchClient(config) def logClient = new BatchLogging(config) diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy index b55027a22f..d237b3229c 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy @@ -25,6 +25,11 @@ import groovy.transform.Memoized import groovy.transform.PackageScope import groovy.util.logging.Slf4j import nextflow.BuildInfo +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.container.ContainerHelper +import nextflow.script.dsl.Description import nextflow.exception.AbortOperationException import nextflow.k8s.client.ClientConfig import nextflow.k8s.client.K8sClient @@ -40,39 +45,193 @@ import nextflow.util.Duration * * @author Paolo Di Tommaso */ +@ScopeName("k8s") +@Description(""" + The `k8s` scope controls the deployment and execution of workflow applications in a Kubernetes cluster. +""") @Slf4j @CompileStatic -class K8sConfig implements Map { +class K8sConfig implements ConfigScope { static final private Map DEFAULT_FUSE_PLUGIN = Map.of('nextflow.io/fuse', 1) - @Delegate - private Map target + @ConfigOption + @Description(""" + Automatically mount host paths into the task pods (default: `false`). Only intended for development purposes when using a single node. + """) + final boolean autoMountHostPaths + + @ConfigOption + @Description(""" + Whether to use Kubernetes `Pod` or `Job` resource type to carry out Nextflow tasks (default: `Pod`). + """) + final String computeResourceType + + @ConfigOption + @Description(""" + The Kubernetes [configuration context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) to use. + """) + final String context + + @ConfigOption + @Description(""" + When `true`, set both the pod CPUs `request` and `limit` to the value specified by the `cpus` directive, otherwise set only the `request` (default: `false`). + """) + final boolean cpuLimits + + final K8sDebug debug + + @ConfigOption + @Description(""" + Include the hostname of each task in the execution trace (default: `false`). + """) + final boolean fetchNodeName + + @ConfigOption + @Description(""" + The FUSE device plugin to be used when enabling Fusion in unprivileged mode (default: `['nextflow.io/fuse': 1]`). + """) + final Map fuseDevicePlugin + + @ConfigOption + @Description(""" + The Kubernetes HTTP client request connection timeout e.g. `'60s'`. + """) + final Duration httpConnectTimeout + + @ConfigOption + @Description(""" + The Kubernetes HTTP client request connection read timeout e.g. `'60s'`. + """) + final Duration httpReadTimeout + + @ConfigOption + @Description(""" + The strategy for pulling container images. Can be `IfNotPresent`, `Always`, `Never`. + + [Read more](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) + """) + final String imagePullPolicy + + @ConfigOption + @Description(""" + The path where the workflow is launched and the user data is stored (default: `/`). Must be a path in a shared K8s persistent volume. + """) + final String launchDir + + @ConfigOption + @Description(""" + The Kubernetes namespace to use (default: `default`). + """) + final String namespace + + @ConfigOption(types=[List, Map]) + @Description(""" + Additional pod configuration options such as environment variables, config maps, secrets, etc. Allows the same settings as the [pod](https://nextflow.io/docs/latest/process.html#pod) process directive. + """) + final PodOptions pod + + @ConfigOption + @Description(""" + The path where Nextflow projects are downloaded (default: `/projects`). Must be a path in a shared K8s persistent volume. + """) + final String projectDir + + @Deprecated + @ConfigOption + @Description(""" + """) + final String pullPolicy + + final K8sRetryConfig retryPolicy + + @ConfigOption(types=[Integer, String]) + @Description(""" + The user ID to be used to run the containers. Shortcut for the `securityContext` option. + """) + final Object runAsUser + + @ConfigOption + @Description(""" + The [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) to use for all pods. + """) + final Map securityContext + + @ConfigOption + @Description(""" + The Kubernetes [service account name](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to use. + """) + final String serviceAccount + + @ConfigOption + @Description(""" + The name of the persistent volume claim where the shared work directory is stored. + """) + final String storageClaimName + + @ConfigOption + @Description(""" + The mount path for the persistent volume claim (default: `/workspace`). + """) + final String storageMountPath + + @ConfigOption + @Description(""" + The path in the persistent volume to be mounted (default: `/`). + """) + final String storageSubPath + + @ConfigOption + @Description(""" + The path of the shared work directory (default: `/work`). Must be a path in a shared K8s persistent volume. + """) + final String workDir + + /* required by extension point -- do not remove */ + K8sConfig() { + this(Collections.emptyMap()) + } - private PodOptions podOptions + K8sConfig(Map opts) { + autoMountHostPaths = opts.autoMountHostPaths as boolean + computeResourceType = opts.computeResourceType + context = opts.context + cpuLimits = opts.cpuLimits as boolean + debug = new K8sDebug(opts.debug as Map ?: Collections.emptyMap()) + fetchNodeName = opts.fetchNodeName as boolean + fuseDevicePlugin = parseFuseDevicePlugin(opts.fuseDevicePlugin) + httpConnectTimeout = opts.httpConnectTimeout as Duration + httpReadTimeout = opts.httpReadTimeout as Duration + imagePullPolicy = opts.pullPolicy ?: opts.imagePullPolicy + namespace = opts.namespace + pod = createPodOptions(opts.pod) + retryPolicy = new K8sRetryConfig(opts.retryPolicy as Map ?: Collections.emptyMap()) + runAsUser = opts.runAsUser + securityContext = opts.securityContext as Map + serviceAccount = opts.serviceAccount + storageClaimName = opts.storageClaimName + storageMountPath = opts.storageMountPath ?: '/workspace' + storageSubPath = opts.storageSubPath + + launchDir = opts.launchDir ?: "${storageMountPath}/${getUserName()}" + projectDir = opts.projectDir ?: "${storageMountPath}/projects" + workDir = opts.workDir ?: "${launchDir}/work" - K8sConfig(Map config) { - target = config ?: Collections.emptyMap() + // -- shortcut to pod image pull-policy + if( imagePullPolicy ) + pod.imagePullPolicy = imagePullPolicy - this.podOptions = createPodOptions(target.pod) - if( getStorageClaimName() ) { - final name = getStorageClaimName() - final mount = getStorageMountPath() - final subPath = getStorageSubPath() - this.podOptions.volumeClaims.add(new PodVolumeClaim(name, mount, subPath)) + // -- shortcut to pod volume claim + if( storageClaimName ) { + final volumeClaim = new PodVolumeClaim(storageClaimName, storageMountPath, storageSubPath) + pod.volumeClaims.add(volumeClaim) } - // -- shortcut to pod image pull-policy - if( target.pullPolicy ) - podOptions.imagePullPolicy = target.pullPolicy.toString() - else if( target.imagePullPolicy ) - podOptions.imagePullPolicy = target.imagePullPolicy.toString() - // -- shortcut to pod security context - if( target.runAsUser != null ) - podOptions.securityContext = new PodSecurityContext(target.runAsUser) - else if( target.securityContext instanceof Map ) - podOptions.securityContext = new PodSecurityContext(target.securityContext as Map) + if( runAsUser ) + pod.securityContext = new PodSecurityContext(runAsUser) + else if( securityContext ) + pod.securityContext = new PodSecurityContext(securityContext) } private PodOptions createPodOptions( value ) { @@ -89,43 +248,30 @@ class K8sConfig implements Map { } Map getLabels() { - podOptions.getLabels() + pod.getLabels() } Map getAnnotations() { - podOptions.getAnnotations() - } - - K8sDebug getDebug() { - new K8sDebug( (Map)get('debug') ) + pod.getAnnotations() } boolean getCleanup(boolean defValue=true) { - target.cleanup == null ? defValue : Boolean.valueOf( target.cleanup as String ) + defValue } String getUserName() { - target.userName ?: System.properties.get('user.name') + System.properties.get('user.name') } - String getStorageClaimName() { - target.storageClaimName as String - } - - String getStorageMountPath() { - target.storageMountPath ?: '/workspace' as String - } - - String getStorageSubPath() { - target.storageSubPath + Map fuseDevicePlugin() { + fuseDevicePlugin } - Map fuseDevicePlugin() { - final result = target.fuseDevicePlugin - if( result instanceof Map && result.size()==1 ) - return result as Map - if( result ) - log.warn1 "Setting 'fuseDevicePlugin' should be a map object providing exactly one entry - offending value: $result" + Map parseFuseDevicePlugin(Object value) { + if( value instanceof Map && value.size()==1 ) + return value as Map + if( value != null ) + log.warn1 "Setting 'k8s.fuseDevicePlugin' should be a map containing exactly one entry - offending value: $value" return DEFAULT_FUSE_PLUGIN } @@ -140,74 +286,36 @@ class K8sConfig implements Map { * */ boolean entrypointOverride() { - def result = target.entrypointOverride - if( result == null ) - result = System.getenv('NXF_CONTAINER_ENTRYPOINT_OVERRIDE') - return result - } - - /** - * @return the path where the workflow is launched and the user data is stored - */ - String getLaunchDir() { - if( target.userDir ) { - log.warn "K8s `userDir` has been deprecated -- Use `launchDir` instead" - return target.userDir - } - target.launchDir ?: "${getStorageMountPath()}/${getUserName()}" as String - } - - /** - * @return Defines the path where the workflow temporary data is stored. This must be a path in a shared K8s persistent volume (default:/work). - */ - String getWorkDir() { - target.workDir ?: "${getLaunchDir()}/work" as String - } - - /** - * @return Defines the path where Nextflow projects are downloaded. This must be a path in a shared K8s persistent volume (default: /projects). - */ - String getProjectDir() { - target.projectDir ?: "${getStorageMountPath()}/projects" as String + return ContainerHelper.entrypointOverride() } - String getNamespace() { target.namespace } - - boolean useJobResource() { ResourceType.Job.name() == target.computeResourceType?.toString() } - - String getServiceAccount() { target.serviceAccount } + boolean useJobResource() { ResourceType.Job.name() == computeResourceType } String getNextflowImageName() { - final defImage = "nextflow/nextflow:${BuildInfo.version}" - return target.navigate('nextflow.image', defImage) - } - - boolean getAutoMountHostPaths() { - Boolean.valueOf( target.autoMountHostPaths as String ) + return "nextflow/nextflow:${BuildInfo.version}" } PodOptions getPodOptions() { - podOptions + pod } - @Memoized boolean fetchNodeName() { - Boolean.valueOf( target.fetchNodeName as String ) + fetchNodeName } /** * @return the collection of defined volume claim names */ Collection getClaimNames() { - podOptions.volumeClaims.collect { it.claimName } + pod.volumeClaims.collect { it.claimName } } Collection getClaimPaths() { - podOptions.volumeClaims.collect { it.mountPath } + pod.volumeClaims.collect { it.mountPath } } boolean cpuLimitsEnabled() { - target.cpuLimits ?: false + cpuLimits } /** @@ -217,50 +325,27 @@ class K8sConfig implements Map { * @return The volume claim name for the given mount path */ String findVolumeClaimByPath(String path) { - def result = podOptions.volumeClaims.find { path.startsWith(it.mountPath) } + final result = pod.volumeClaims.find { path.startsWith(it.mountPath) } return result ? result.claimName : null } @Memoized ClientConfig getClient() { - final result = ( target.client instanceof Map - ? clientFromNextflow(target.client as Map, target.namespace as String, target.serviceAccount as String) - : clientDiscovery(target.context as String, target.namespace as String, target.serviceAccount as String) - ) - - if( target.httpConnectTimeout ) - result.httpConnectTimeout = target.httpConnectTimeout as Duration + final result = clientDiscovery(context, namespace, serviceAccount) - if( target.httpReadTimeout ) - result.httpReadTimeout = target.httpReadTimeout as Duration + if( httpConnectTimeout ) + result.httpConnectTimeout = httpConnectTimeout - if( target.retryPolicy ) - result.retryConfig = new K8sRetryConfig(target.retryPolicy as Map) + if( httpReadTimeout ) + result.httpReadTimeout = httpReadTimeout - if( target.maxErrorRetry ) - log.warn("Config setting 'k8s.maxErrorRetry' is deprecated. Change it to 'k8s.retryPolicy.maxAttempts'") + if( retryPolicy ) + result.retryConfig = retryPolicy return result } - /** - * Get the K8s client config from the declaration made in the Nextflow config file - * - * @param map - * A map representing the clint configuration options define in the nextflow - * config file - * @param namespace - * The K8s namespace to be used. If omitted {@code default} is used. - * @param serviceAccount - * The K8s service account to be used. If omitted {@code default} is used. - * @return - * The Kubernetes {@link ClientConfig} object - */ - @PackageScope ClientConfig clientFromNextflow(Map map, @Nullable String namespace, @Nullable String serviceAccount ) { - ClientConfig.fromNextflowConfig(map,namespace,serviceAccount) - } - /** * Discover the K8s client config from the execution environment * that can be either a `.kube/config` file or service meta file @@ -280,7 +365,7 @@ class K8sConfig implements Map { } void checkStorageAndPaths(K8sClient client) { - if( !getStorageClaimName() ) + if( !storageClaimName ) throw new AbortOperationException("Missing K8s storage volume claim -- The name of a persistence volume claim needs to be provided in the nextflow configuration file") log.debug "Kubernetes workDir=$workDir; projectDir=$projectDir; volumeClaims=${getClaimNames()}" @@ -297,29 +382,28 @@ class K8sConfig implements Map { } } - if( !findVolumeClaimByPath(getLaunchDir()) ) + if( !findVolumeClaimByPath(launchDir) ) throw new AbortOperationException("Kubernetes `launchDir` must be a path mounted as a persistent volume -- launchDir=$launchDir; volumes=${getClaimPaths().join(', ')}") - if( !findVolumeClaimByPath(getWorkDir()) ) + if( !findVolumeClaimByPath(workDir) ) throw new AbortOperationException("Kubernetes `workDir` must be a path mounted as a persistent volume -- workDir=$workDir; volumes=${getClaimPaths().join(', ')}") - if( !findVolumeClaimByPath(getProjectDir()) ) + if( !findVolumeClaimByPath(projectDir) ) throw new AbortOperationException("Kubernetes `projectDir` must be a path mounted as a persistent volume -- projectDir=$projectDir; volumes=${getClaimPaths().join(', ')}") - } - @CompileStatic - static class K8sDebug { + static class K8sDebug implements ConfigScope { - @Delegate - Map target + @ConfigOption + @Description(""" + Save the pod spec for each task to `.command.yaml` in the task directory (default: `false`). + """) + final boolean yaml - K8sDebug(Map debug) { - this.target = debug ?: Collections.emptyMap() + K8sDebug(Map opts) { + yaml = opts.yaml as boolean } - - boolean getYaml() { Boolean.valueOf( target.yaml as String ) } } } diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/K8sExecutor.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/K8sExecutor.groovy index 1a0b1d94e8..12cda83110 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/K8sExecutor.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/K8sExecutor.groovy @@ -86,7 +86,7 @@ class K8sExecutor extends Executor implements ExtensionPoint { */ @Override protected TaskMonitor createTaskMonitor() { - TaskPollingMonitor.create(session, name, 100, Duration.of('5 sec')) + TaskPollingMonitor.create(session, config, name, 100, Duration.of('5 sec')) } /** diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/K8sTaskHandler.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/K8sTaskHandler.groovy index 26c105a18f..25df16362c 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/K8sTaskHandler.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/K8sTaskHandler.groovy @@ -25,6 +25,7 @@ import groovy.transform.CompileDynamic import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import nextflow.SysEnv +import nextflow.container.ContainerHelper import nextflow.container.DockerBuilder import nextflow.exception.NodeTerminationException import nextflow.k8s.client.PodUnschedulableException @@ -165,7 +166,7 @@ class K8sTaskHandler extends TaskHandler implements FusionAwareTask { protected String getOwner() { OWNER } protected Boolean fixOwnership() { - task.containerConfig.fixOwnership + ContainerHelper.fixOwnership(task.containerConfig) } /** diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy index 78986e516d..1cb70942a5 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy @@ -100,46 +100,6 @@ class ClientConfig { new ConfigDiscovery().discover(context, namespace, serviceAccount) } - static ClientConfig fromNextflowConfig(Map opts, String namespace, String serviceAccount) { - final result = new ClientConfig() - - if( opts.server ) - result.server = opts.server - - if( opts.token ) - result.token = opts.token - else if( opts.tokenFile ) - result.token = Paths.get(opts.tokenFile.toString()).getText('UTF-8') - - result.namespace = namespace ?: opts.namespace ?: 'default' - - result.serviceAccount = serviceAccount ?: 'default' - - if( opts.verifySsl ) - result.verifySsl = opts.verifySsl as boolean - - if( opts.sslCert ) - result.sslCert = opts.sslCert.toString().decodeBase64() - else if( opts.sslCertFile ) - result.sslCert = Paths.get(opts.sslCertFile.toString()).bytes - - if( opts.clientCert ) - result.clientCert = opts.clientCert.toString().decodeBase64() - else if( opts.clientCertFile ) - result.clientCert = Paths.get(opts.clientCertFile.toString()).bytes - - if( opts.clientKey ) - result.clientKey = opts.clientKey.toString().decodeBase64() - else if( opts.clientKeyFile ) - result.clientKey = Paths.get(opts.clientKeyFile.toString()).bytes - if( opts.retryPolicy ) - result.retryConfig = new K8sRetryConfig(opts.retryPolicy as Map) - if( opts.maxErrorRetry ) - log.warn("Config setting 'k8s.maxErrorRetry' is deprecated - change it to 'k8s.retryPolicy.maxAttempts'") - - return result - } - static ClientConfig fromUserAndCluster(Map user, Map cluster, Path location) { final base = location.isDirectory() ? location : location.parent final result = new ClientConfig() diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/client/K8sRetryConfig.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/client/K8sRetryConfig.groovy index d6d658433e..ac02251537 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/client/K8sRetryConfig.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/client/K8sRetryConfig.groovy @@ -20,6 +20,9 @@ package nextflow.k8s.client import groovy.transform.CompileStatic import groovy.transform.EqualsAndHashCode import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.Duration /** @@ -30,10 +33,30 @@ import nextflow.util.Duration @ToString(includePackage = false, includeNames = true) @EqualsAndHashCode @CompileStatic -class K8sRetryConfig { +class K8sRetryConfig implements ConfigScope { + + @ConfigOption + @Description(""" + Delay when retrying failed API requests (default: `250ms`). + """) Duration delay = Duration.of('250ms') + + @ConfigOption + @Description(""" + Max delay when retrying failed API requests (default: `90s`). + """) Duration maxDelay = Duration.of('90s') + + @ConfigOption + @Description(""" + Max attempts when retrying failed API requests (default: `4`). + """) int maxAttempts = 4 + + @ConfigOption + @Description(""" + Jitter value when retrying failed API requests (default: `0.25`). + """) double jitter = 0.25 K8sRetryConfig() { diff --git a/plugins/nf-k8s/src/resources/META-INF/extensions.idx b/plugins/nf-k8s/src/resources/META-INF/extensions.idx index a0488c8771..be9016a816 100644 --- a/plugins/nf-k8s/src/resources/META-INF/extensions.idx +++ b/plugins/nf-k8s/src/resources/META-INF/extensions.idx @@ -14,5 +14,6 @@ # limitations under the License. # +nextflow.k8s.K8sConfig nextflow.k8s.K8sExecutor nextflow.k8s.cli.KubeCommandImpl diff --git a/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy b/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy index 60ab604403..bb0f8b42c4 100644 --- a/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy +++ b/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy @@ -17,6 +17,7 @@ package nextflow.k8s import nextflow.BuildInfo +import nextflow.SysEnv import nextflow.k8s.client.ClientConfig import nextflow.k8s.model.PodEnv import nextflow.k8s.model.PodSecurityContext @@ -62,36 +63,6 @@ class K8sConfigTest extends Specification { cfg = new K8sConfig() then: 'it should return false specified as default' !cfg.getCleanup(false) - - when: - cfg = new K8sConfig(cleanup:false) - then: 'it should return false' - !cfg.getCleanup() - - when: - cfg = new K8sConfig(cleanup:'false') - then: 'it should return false' - !cfg.getCleanup() - - when: - cfg = new K8sConfig(cleanup:true) - then: 'it should return true' - cfg.getCleanup() - - when: - cfg = new K8sConfig(cleanup:'true') - then: 'it should return true' - cfg.getCleanup() - - when: - cfg = new K8sConfig(cleanup:true) - then: 'the default value should be ignored' - cfg.getCleanup(false) - - when: - cfg = new K8sConfig(cleanup:'true') - then: 'the default value should be ignored' - cfg.getCleanup(false) } def 'should create config with storage claims' () { @@ -141,62 +112,16 @@ class K8sConfigTest extends Specification { cfg.fuseDevicePlugin() == ['foo/fuse':10] } + @Unroll def 'should create client config' () { - given: - def CONFIG = [namespace: 'this', serviceAccount: 'that', client: [server: 'http://foo']] - - when: - def config = new K8sConfig(CONFIG) - def client = config.getClient() - then: - client.server == 'http://foo' - client.namespace == 'this' - client.serviceAccount == 'that' - client.httpConnectTimeout == null // testing default null - client.httpReadTimeout == null // testing default null - client.retryConfig.maxAttempts == 4 - - } - - def 'should set maxErrorRetry' () { - given: - def CONFIG = [retryPolicy: [ maxAttempts: 10], namespace: 'this', serviceAccount: 'that', client: [server: 'http://foo']] - - when: - def config = new K8sConfig(CONFIG) - def client = config.getClient() - then: - client.retryConfig.maxAttempts == 10 - } - - def 'should create client config with http request timeouts' () { - given: def CONFIG = [ - namespace: 'this', - serviceAccount: 'that', - client: [server: 'http://foo'], + context: CONTEXT, + namespace: NAMESPACE, + serviceAccount: SERVICE_ACCOUNT, httpReadTimeout: '20s', httpConnectTimeout: '25s' ] - - when: - def config = new K8sConfig(CONFIG) - def client = config.getClient() - then: - client.server == 'http://foo' - client.namespace == 'this' - client.serviceAccount == 'that' - client.httpConnectTimeout == Duration.of('25s') - client.httpReadTimeout == Duration.of('20s') - - } - - @Unroll - def 'should create client config with discovery' () { - - given: - def CONFIG = [context: CONTEXT, namespace: NAMESPACE, serviceAccount: SERVICE_ACCOUNT] K8sConfig config = Spy(K8sConfig, constructorArgs: [ CONFIG ]) when: @@ -207,6 +132,8 @@ class K8sConfigTest extends Specification { client.server == SERVER client.namespace == NAMESPACE ?: 'default' client.serviceAccount == SERVICE_ACCOUNT ?: 'default' + client.httpConnectTimeout == Duration.of('25s') + client.httpReadTimeout == Duration.of('20s') where: CONTEXT | SERVER | NAMESPACE | SERVICE_ACCOUNT @@ -222,11 +149,6 @@ class K8sConfigTest extends Specification { then: cfg.getNextflowImageName() == "nextflow/nextflow:${BuildInfo.version}" - when: - cfg = new K8sConfig(nextflow: [image: 'foo/bar:1.0']) - then: - cfg.getNextflowImageName() == 'foo/bar:1.0' - } def 'should get autoMountHostPaths' () { @@ -241,20 +163,10 @@ class K8sConfigTest extends Specification { then: cfg.getAutoMountHostPaths() - when: - cfg = new K8sConfig(autoMountHostPaths: 'true') - then: - cfg.getAutoMountHostPaths() - when: cfg = new K8sConfig(autoMountHostPaths: false) then: !cfg.getAutoMountHostPaths() - - when: - cfg = new K8sConfig(autoMountHostPaths: 'false') - then: - !cfg.getAutoMountHostPaths() } @@ -283,11 +195,6 @@ class K8sConfigTest extends Specification { def cfg = new K8sConfig() then: cfg.getUserName() == System.properties.get('user.name') - - when: - cfg = new K8sConfig(userName: 'foo') - then: - cfg.getUserName() == 'foo' } def 'should return user dir' () { @@ -297,12 +204,12 @@ class K8sConfigTest extends Specification { cfg.getLaunchDir() == '/workspace/' + System.properties.get('user.name') when: - cfg = new K8sConfig(storageMountPath: '/this/path', userName: 'foo') + cfg = new K8sConfig(storageMountPath: '/this/path') then: - cfg.getLaunchDir() == '/this/path/foo' + cfg.getLaunchDir() == '/this/path/' + System.properties.get('user.name') when: - cfg = new K8sConfig(storageMountPath: '/this/path', userName: 'foo', launchDir: '/my/path') + cfg = new K8sConfig(storageMountPath: '/this/path', launchDir: '/my/path') then: cfg.getLaunchDir() == '/my/path' @@ -459,28 +366,21 @@ class K8sConfigTest extends Specification { !cfg.entrypointOverride() when: - cfg = new K8sConfig( entrypointOverride: true ) + SysEnv.push(NXF_CONTAINER_ENTRYPOINT_OVERRIDE: 'true') + cfg = new K8sConfig() + def result = cfg.entrypointOverride() + SysEnv.pop() then: - cfg.entrypointOverride() + result } def 'should set debug.yaml' () { when: - def cfg = new K8sConfig( debug: [yaml: 'true'] ) + def cfg = new K8sConfig( debug: [yaml: true] ) then: cfg.getDebug().getYaml() - when: - cfg = new K8sConfig( debug: [yaml: true] ) - then: - cfg.getDebug().getYaml() - - when: - cfg = new K8sConfig( debug: [yaml: 'false'] ) - then: - !cfg.getDebug().getYaml() - when: cfg = new K8sConfig( debug: [yaml: false] ) then: @@ -495,29 +395,14 @@ class K8sConfigTest extends Specification { cfg = new K8sConfig( debug: [:] ) then: !cfg.getDebug().getYaml() - - when: - cfg = new K8sConfig( null ) - then: - !cfg.getDebug().getYaml() } def 'should set fetchNodeName' () { when: - def cfg = new K8sConfig( fetchNodeName: 'true' ) - then: - cfg.fetchNodeName() == true - - when: - cfg = new K8sConfig( fetchNodeName: true ) + def cfg = new K8sConfig( fetchNodeName: true ) then: cfg.fetchNodeName() == true - when: - cfg = new K8sConfig( fetchNodeName: 'false' ) - then: - cfg.fetchNodeName() == false - when: cfg = new K8sConfig( fetchNodeName: false ) then: diff --git a/plugins/nf-k8s/src/test/nextflow/k8s/K8sDriverLauncherTest.groovy b/plugins/nf-k8s/src/test/nextflow/k8s/K8sDriverLauncherTest.groovy index 19b14916ab..3ba962c2ab 100644 --- a/plugins/nf-k8s/src/test/nextflow/k8s/K8sDriverLauncherTest.groovy +++ b/plugins/nf-k8s/src/test/nextflow/k8s/K8sDriverLauncherTest.groovy @@ -75,7 +75,7 @@ class K8sDriverLauncherTest extends Specification { when: k8sConfig = driver.makeK8sConfig([:]) then: - k8sConfig == new K8sConfig() + k8sConfig != null when: k8sConfig = driver.makeK8sConfig(k8s: [storageClaimName: 'foo', storageMountPath: '/mnt']) diff --git a/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy b/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy index 5993921a64..d7e46bfb61 100644 --- a/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy +++ b/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy @@ -42,79 +42,4 @@ class ClientConfigTest extends Specification { } - def 'should create a client config from a map' () { - - given: - def MAP = [ - server:'foo.com', - token: 'blah-blah', - namespace: 'my-namespace', - verifySsl: true, - sslCert: 'fizzbuzz'.bytes.encodeBase64().toString(), - clientCert: 'hello'.bytes.encodeBase64().toString(), - clientKey: 'world'.bytes.encodeBase64().toString() ] - - when: - def result = ClientConfig.fromNextflowConfig(MAP, null, null) - - then: - result.server == 'foo.com' - result.token == 'blah-blah' - result.namespace == 'my-namespace' - result.serviceAccount == 'default' - result.verifySsl - result.clientCert == 'hello'.bytes - result.clientKey == 'world'.bytes - result.sslCert == 'fizzbuzz'.bytes - - when: - result = ClientConfig.fromNextflowConfig(MAP, 'ns1', 'sa2') - then: - result.server == 'foo.com' - result.token == 'blah-blah' - result.namespace == 'ns1' - result.serviceAccount == 'sa2' - result.verifySsl - result.clientCert == 'hello'.bytes - result.clientKey == 'world'.bytes - result.sslCert == 'fizzbuzz'.bytes - } - - def 'should create a client config from a map with files' () { - - given: - def folder = Files.createTempDirectory('test') - def file1 = folder.resolve('file1') - def file2 = folder.resolve('file2') - def file3 = folder.resolve('file3') - file1.text = 'fizzbuzz'.bytes.encodeBase64().toString() - file2.text = 'hello'.bytes.encodeBase64().toString() - file3.text = 'world'.bytes.encodeBase64().toString() - - def MAP = [ - server:'foo.com', - token: 'blah-blah', - namespace: 'my-namespace', - verifySsl: false, - sslCertFile: file1, - clientCertFile: file2, - clientKeyFile: file3 ] - - when: - def result = ClientConfig.fromNextflowConfig(MAP, null, null) - - then: - result.server == 'foo.com' - result.token == 'blah-blah' - result.namespace == 'my-namespace' - result.serviceAccount == 'default' - !result.verifySsl - result.sslCert == file1.text.bytes - result.clientCert == file2.text.bytes - result.clientKey == file3.text.bytes - - cleanup: - folder?.deleteDir() - } - } diff --git a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerClient.groovy b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerClient.groovy index 9601b93b2f..977bab595f 100644 --- a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerClient.groovy +++ b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerClient.groovy @@ -137,6 +137,8 @@ class TowerClient implements TraceObserverV2 { private boolean towerLaunch + private String accessToken + private String refreshToken private String workspaceId @@ -145,14 +147,11 @@ class TowerClient implements TraceObserverV2 { private Map allContainers = new ConcurrentHashMap<>() - /** - * Constructor that consumes a URL and creates - * a basic HTTP client. - * @param endpoint The target address for sending messages to - */ - TowerClient(Session session, String endpoint) { + TowerClient(Session session, TowerConfig config) { this.session = session - this.endpoint = checkUrl(endpoint) + this.endpoint = checkUrl(config.endpoint) + this.accessToken = config.accessToken + this.workspaceId = config.workspaceId this.schema = loadSchema() this.generator = TowerJsonGenerator.create(schema) this.reports = new TowerReports(session) @@ -201,10 +200,6 @@ class TowerClient implements TraceObserverV2 { this.backOffDelay = value } - void setWorkspaceId( String workspaceId ) { - this.workspaceId = workspaceId - } - String getWorkspaceId() { workspaceId } /** @@ -381,15 +376,9 @@ class TowerClient implements TraceObserverV2 { } String getAccessToken() { - // when 'TOWER_WORKFLOW_ID' is provided in the env, it's a tower made launch - // therefore the access token should only be taken from the env - // otherwise check into the config file and fallback in the env - def token = env.get('TOWER_WORKFLOW_ID') - ? env.get('TOWER_ACCESS_TOKEN') - : session.config.navigate('tower.accessToken', env.get('TOWER_ACCESS_TOKEN')) - if( !token ) + if( !accessToken ) throw new AbortOperationException("Missing Seqera Platform access token -- Make sure there's a variable TOWER_ACCESS_TOKEN in your environment") - return token + return accessToken } /** diff --git a/modules/nf-lang/src/main/java/nextflow/config/scopes/TowerConfig.java b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerConfig.groovy similarity index 50% rename from modules/nf-lang/src/main/java/nextflow/config/scopes/TowerConfig.java rename to plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerConfig.groovy index c1a2644b49..f65003eaba 100644 --- a/modules/nf-lang/src/main/java/nextflow/config/scopes/TowerConfig.java +++ b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerConfig.groovy @@ -1,5 +1,5 @@ /* - * Copyright 2024-2025, Seqera Labs + * Copyright 2013-2024, Seqera Labs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -12,37 +12,61 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package nextflow.config.scopes; -import nextflow.config.schema.ConfigOption; -import nextflow.config.schema.ConfigScope; -import nextflow.script.dsl.Description; +package io.seqera.tower.plugin -public class TowerConfig implements ConfigScope { +import groovy.transform.CompileStatic +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description +import nextflow.platform.PlatformHelper + +/** + * Model Seqera Platform configuration + * + * @author Paolo Di Tommaso + */ +@ScopeName("tower") +@Description(""" + The `tower` scope controls the settings for [Seqera Platform](https://seqera.io) (formerly Tower Cloud). +""") +@CompileStatic +class TowerConfig implements ConfigScope { @ConfigOption @Description(""" The unique access token for your Seqera Platform account. """) - public String accessToken; + final String accessToken @ConfigOption @Description(""" Enable workflow monitoring with Seqera Platform (default: `false`). """) - public boolean enabled; + final boolean enabled @ConfigOption @Description(""" The endpoint of your Seqera Platform instance (default: `https://api.cloud.seqera.io`). """) - public String endpoint; + final String endpoint @ConfigOption @Description(""" The workspace ID in Seqera Platform in which to save the run (default: the launching user's personal workspace). """) - public String workspaceId; + final String workspaceId + + /* required by extension point -- do not remove */ + TowerConfig() {} + TowerConfig(Map opts, Map env) { + this.accessToken = PlatformHelper.getAccessToken(opts, env) + this.enabled = opts.enabled as boolean + this.endpoint = PlatformHelper.getEndpoint(opts, env) + this.workspaceId = PlatformHelper.getWorkspaceId(opts, env) + } } diff --git a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerFactory.groovy b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerFactory.groovy index b40ab0d5e8..5845dd5c91 100644 --- a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerFactory.groovy +++ b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerFactory.groovy @@ -59,34 +59,26 @@ class TowerFactory implements TraceObserverFactoryV2 { return result } - static protected TowerClient createTowerClient0(Session session, Map config, Map env) { - String endpoint = config.navigate('tower.endpoint') as String - Duration requestInterval = config.navigate('tower.requestInterval') as Duration - Duration aliveInterval = config.navigate('tower.aliveInterval') as Duration + static protected TowerClient createTowerClient0(Session session, TowerConfig config, Map env) { + final opts = session.config.tower as Map ?: Collections.emptyMap() - if ( !endpoint || endpoint=='-' ) - endpoint = env.get('TOWER_API_ENDPOINT') ?: TowerClient.DEF_ENDPOINT_URL + Duration requestInterval = opts.requestInterval as Duration + Duration aliveInterval = opts.aliveInterval as Duration - final tower = new TowerClient(session, endpoint).withEnvironment(env) + final tower = new TowerClient(session, config).withEnvironment(env) if( aliveInterval ) tower.aliveInterval = aliveInterval if( requestInterval ) tower.requestInterval = requestInterval // error handling settings - tower.maxRetries = config.navigate('tower.maxRetries', 5) as int - tower.backOffBase = config.navigate('tower.backOffBase', SimpleHttpClient.DEFAULT_BACK_OFF_BASE) as int - tower.backOffDelay = config.navigate('tower.backOffDelay', SimpleHttpClient.DEFAULT_BACK_OFF_DELAY ) as int - // when 'TOWER_WORKFLOW_ID' is provided in the env, it's a tower made launch - // therefore the workspace should only be taken from the env - // otherwise check into the config file and fallback in the env - tower.workspaceId = env.get('TOWER_WORKFLOW_ID') - ? env.get('TOWER_WORKSPACE_ID') - : config.navigate('tower.workspaceId', env.get('TOWER_WORKSPACE_ID')) + tower.maxRetries = opts.maxRetries != null ? opts.maxRetries as int : 5 + tower.backOffBase = opts.backOffBase != null ? opts.backOffBase as int : SimpleHttpClient.DEFAULT_BACK_OFF_BASE + tower.backOffDelay = opts.backOffDelay != null ? opts.backOffDelay as int : SimpleHttpClient.DEFAULT_BACK_OFF_DELAY // register auth provider // note: this is needed to authorize access to resources via XFileSystemProvider used by NF // it's not needed by the tower client logic - XAuthRegistry.instance.register(provider(tower.endpoint, tower.accessToken)) + XAuthRegistry.instance.register(provider(config.endpoint, config.accessToken)) return tower } @@ -99,8 +91,9 @@ class TowerFactory implements TraceObserverFactoryV2 { @Memoized static TowerClient client(Session session, Map env) { - final config = session.config - Boolean isEnabled = config.navigate('tower.enabled') as Boolean || env.get('TOWER_WORKFLOW_ID') || config.navigate('fusion.enabled') as Boolean + final opts = session.config.tower as Map ?: Collections.emptyMap() + final config = new TowerConfig(opts, env) + Boolean isEnabled = config.enabled || env.get('TOWER_WORKFLOW_ID') || session.config.navigate('fusion.enabled') as Boolean return isEnabled ? createTowerClient0(session, config, env) : null diff --git a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerReports.groovy b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerReports.groovy index 1296743644..5770f2ced1 100644 --- a/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerReports.groovy +++ b/plugins/nf-tower/src/main/io/seqera/tower/plugin/TowerReports.groovy @@ -32,10 +32,10 @@ import groovy.yaml.YamlSlurper import groovyx.gpars.agent.Agent import nextflow.Session import nextflow.file.FileHelper -import nextflow.trace.GraphObserver -import nextflow.trace.ReportObserver -import nextflow.trace.TimelineObserver -import nextflow.trace.TraceFileObserver +import nextflow.trace.config.DagConfig +import nextflow.trace.config.ReportConfig +import nextflow.trace.config.TimelineConfig +import nextflow.trace.config.TraceConfig /** * If reports are defined at `nf--tower.yml`, collects all published files * that are reports and writes `nf--reports.tsv` file with all the paths. @@ -233,16 +233,16 @@ class TowerReports { final files = [] if( config.navigate('report.enabled') ) - files << config.navigate('report.file', ReportObserver.DEF_FILE_NAME) + files << config.navigate('report.file', ReportConfig.defaultFileName()) if( config.navigate('timeline.enabled') ) - files << config.navigate('timeline.file', TimelineObserver.DEF_FILE_NAME) + files << config.navigate('timeline.file', TimelineConfig.defaultFileName()) if( config.navigate('trace.enabled') ) - files << config.navigate('trace.file', TraceFileObserver.DEF_FILE_NAME) + files << config.navigate('trace.file', TraceConfig.defaultFileName()) if( config.navigate('dag.enabled') ) - files << config.navigate('dag.file', GraphObserver.DEF_FILE_NAME) + files << config.navigate('dag.file', DagConfig.defaultFileName()) for( def file : files ) filePublish( (file as Path).complete() ) diff --git a/plugins/nf-tower/src/resources/META-INF/extensions.idx b/plugins/nf-tower/src/resources/META-INF/extensions.idx index d1b8e1597a..02de3e64e7 100644 --- a/plugins/nf-tower/src/resources/META-INF/extensions.idx +++ b/plugins/nf-tower/src/resources/META-INF/extensions.idx @@ -9,5 +9,6 @@ # defined by the Mozilla Public License, v. 2.0. # +io.seqera.tower.plugin.TowerConfig io.seqera.tower.plugin.TowerFactory io.seqera.tower.plugin.TowerFusionToken diff --git a/plugins/nf-tower/src/test/io/seqera/tower/plugin/CacheManagerTest.groovy b/plugins/nf-tower/src/test/io/seqera/tower/plugin/CacheManagerTest.groovy index fde5ff63e5..02790c6225 100644 --- a/plugins/nf-tower/src/test/io/seqera/tower/plugin/CacheManagerTest.groovy +++ b/plugins/nf-tower/src/test/io/seqera/tower/plugin/CacheManagerTest.groovy @@ -20,6 +20,7 @@ package io.seqera.tower.plugin import java.nio.file.Files import java.nio.file.Paths +import nextflow.SysEnv import nextflow.exception.AbortOperationException import spock.lang.Specification /** @@ -28,6 +29,14 @@ import spock.lang.Specification */ class CacheManagerTest extends Specification { + def setupSpec() { + SysEnv.push([:]) + } + + def cleanupSpec() { + SysEnv.pop() + } + def 'should init empty files' () { when: new CacheManager([:]) diff --git a/plugins/nf-tower/src/test/io/seqera/tower/plugin/TowerClientTest.groovy b/plugins/nf-tower/src/test/io/seqera/tower/plugin/TowerClientTest.groovy index 5e364f9c88..cab9b4c897 100644 --- a/plugins/nf-tower/src/test/io/seqera/tower/plugin/TowerClientTest.groovy +++ b/plugins/nf-tower/src/test/io/seqera/tower/plugin/TowerClientTest.groovy @@ -25,7 +25,7 @@ import java.time.ZoneId import nextflow.Session import nextflow.cloud.types.CloudMachineInfo import nextflow.cloud.types.PriceModel -import nextflow.container.ContainerConfig +import nextflow.container.DockerConfig import nextflow.container.resolver.ContainerMeta import nextflow.exception.AbortOperationException import nextflow.script.ScriptBinding @@ -156,37 +156,30 @@ class TowerClientTest extends Specification { def session = Mock(Session) when: - def observer = new TowerClient(session: session, env: [TOWER_ACCESS_TOKEN: 'xyz']) - def result = observer.getAccessToken() + def config = new TowerConfig([accessToken: 'abc'], [TOWER_ACCESS_TOKEN: 'xyz']) + def observer = new TowerClient(session, config) then: - session.getConfig() >> [tower:[accessToken: 'abc'] ] - and: // the token in the config overrides the one in the env - result == 'abc' + observer.getAccessToken() == 'abc' when: - observer = new TowerClient(session: session, env: [TOWER_ACCESS_TOKEN: 'xyz', TOWER_WORKFLOW_ID: '111222333']) - result = observer.getAccessToken() + config = new TowerConfig([accessToken: 'abc'], [TOWER_ACCESS_TOKEN: 'xyz', TOWER_WORKFLOW_ID: '111222333']) + observer = new TowerClient(session, config) then: - session.getConfig() >> [tower:[accessToken: 'abc'] ] - and: // the token from the env is taken because is a tower launch aka TOWER_WORKFLOW_ID is set - result == 'xyz' + observer.getAccessToken() == 'xyz' when: - observer = new TowerClient(session: session, env: [TOWER_ACCESS_TOKEN: 'xyz']) - result = observer.getAccessToken() + config = new TowerConfig([:], [TOWER_ACCESS_TOKEN: 'xyz']) + observer = new TowerClient(session, config) then: - session.getConfig() >> [:] - and: - result == 'xyz' + observer.getAccessToken() == 'xyz' when: - observer = new TowerClient(session: session, env:[:]) + config = new TowerConfig([:], [:]) + observer = new TowerClient(session, config) observer.getAccessToken() then: - session.getConfig() >> [:] - then: thrown(AbortOperationException) } @@ -283,7 +276,7 @@ class TowerClientTest extends Specification { session.getUniqueId() >> sessionId session.getRunName() >> 'foo' session.config >> [:] - session.containerConfig >> new ContainerConfig() + session.containerConfig >> new DockerConfig([:]) session.getParams() >> new ScriptBinding.ParamsMap([foo:'Hello', bar:'World']) def meta = new WorkflowMetadata( @@ -394,15 +387,16 @@ class TowerClientTest extends Specification { getRunName() >> 'foo_bar' getWorkflowMetadata() >> meta } + def config = new TowerConfig([:], [:]) - TowerClient client = Spy(TowerClient, constructorArgs: [session, 'https://tower.nf']) + def client = Spy(new TowerClient(session, config)) when: client.onFlowCreate(session) then: 1 * client.getAccessToken() >> 'secret' 1 * client.makeCreateReq(session) >> [runName: 'foo'] - 1 * client.sendHttpMessage('https://tower.nf/trace/create', [runName: 'foo'], 'POST') >> new TowerClient.Response(200, '{"workflowId":"xyz123"}') + 1 * client.sendHttpMessage('https://api.cloud.seqera.io/trace/create', [runName: 'foo'], 'POST') >> new TowerClient.Response(200, '{"workflowId":"xyz123"}') and: client.runName == 'foo_bar' client.runId == uuid.toString() @@ -414,36 +408,38 @@ class TowerClientTest extends Specification { def 'should get trace endpoint' () { given: - def tower = new TowerClient(Mock(Session), 'https://tower.nf') + def config = new TowerConfig([:], [:]) + def tower = new TowerClient(Mock(Session), config) tower.workflowId = '12345' expect: - tower.getUrlTraceCreate() == 'https://tower.nf/trace/create' - tower.getUrlTraceBegin() == 'https://tower.nf/trace/12345/begin' - tower.getUrlTraceProgress() == 'https://tower.nf/trace/12345/progress' - tower.getUrlTraceHeartbeat() == 'https://tower.nf/trace/12345/heartbeat' - tower.getUrlTraceComplete() == 'https://tower.nf/trace/12345/complete' + tower.getUrlTraceCreate() == 'https://api.cloud.seqera.io/trace/create' + tower.getUrlTraceBegin() == 'https://api.cloud.seqera.io/trace/12345/begin' + tower.getUrlTraceProgress() == 'https://api.cloud.seqera.io/trace/12345/progress' + tower.getUrlTraceHeartbeat() == 'https://api.cloud.seqera.io/trace/12345/heartbeat' + tower.getUrlTraceComplete() == 'https://api.cloud.seqera.io/trace/12345/complete' } def 'should get trace endpoint with workspace' () { given: - def tower = new TowerClient(Mock(Session),'https://tower.nf') + def config = new TowerConfig([workspaceId: '300'], [:]) + def tower = new TowerClient(Mock(Session), config) tower.workflowId = '12345' - tower.workspaceId = '300' expect: - tower.getUrlTraceCreate() == 'https://tower.nf/trace/create?workspaceId=300' - tower.getUrlTraceBegin() == 'https://tower.nf/trace/12345/begin?workspaceId=300' - tower.getUrlTraceProgress() == 'https://tower.nf/trace/12345/progress?workspaceId=300' - tower.getUrlTraceHeartbeat() == 'https://tower.nf/trace/12345/heartbeat?workspaceId=300' - tower.getUrlTraceComplete() == 'https://tower.nf/trace/12345/complete?workspaceId=300' + tower.getUrlTraceCreate() == 'https://api.cloud.seqera.io/trace/create?workspaceId=300' + tower.getUrlTraceBegin() == 'https://api.cloud.seqera.io/trace/12345/begin?workspaceId=300' + tower.getUrlTraceProgress() == 'https://api.cloud.seqera.io/trace/12345/progress?workspaceId=300' + tower.getUrlTraceHeartbeat() == 'https://api.cloud.seqera.io/trace/12345/heartbeat?workspaceId=300' + tower.getUrlTraceComplete() == 'https://api.cloud.seqera.io/trace/12345/complete?workspaceId=300' } def 'should set the auth token' () { given: def http = Mock(SimpleHttpClient) def session = Mock(Session) - TowerClient client = Spy(TowerClient, constructorArgs: [session, 'https://tower.nf']) + def config = new TowerConfig([:], [:]) + def client = Spy(new TowerClient(session, config)) and: def SIMPLE = '4ffbf1009ebabea77db3d72efefa836dfbb71271' def BEARER = 'eyJ0aWQiOiA1fS5jZmM1YjVhOThjZjM2MTk1NjBjZWU1YmMwODUxYzA1ZjkzMDdmN2Iz' diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/cli/WaveRunCmd.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/cli/WaveRunCmd.groovy index ae94bf2461..4edff020cb 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/cli/WaveRunCmd.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/cli/WaveRunCmd.groovy @@ -23,7 +23,7 @@ import groovy.transform.CompileStatic import groovy.util.logging.Slf4j import io.seqera.wave.plugin.WaveClient import nextflow.Session -import nextflow.container.DockerBuilder +import nextflow.container.ContainerBuilder import nextflow.exception.AbortOperationException /** @@ -71,11 +71,10 @@ class WaveRunCmd { void runContainer(String image, List args=Collections.emptyList()) { final containerConfig = session.getContainerConfig() - final containerBuilder = new DockerBuilder(image) + final containerBuilder = ContainerBuilder.create(containerConfig, image) .addMountWorkDir(false) .addRunOptions('--rm') .addMounts(containerMounts) - .params(containerConfig) .params(containerParams) // add env variables diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/HttpOpts.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/HttpOpts.groovy index 1e6b55da73..269b604be0 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/HttpOpts.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/HttpOpts.groovy @@ -19,6 +19,9 @@ package io.seqera.wave.plugin.config import groovy.transform.CompileStatic import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.Duration import nextflow.util.RateUnit @@ -29,10 +32,18 @@ import nextflow.util.RateUnit */ @ToString(includeNames = true, includePackage = false) @CompileStatic -class HttpOpts { +class HttpOpts implements ConfigScope { + @ConfigOption + @Description(""" + The connection timeout for the Wave HTTP client (default: `30s`). + """) final private Duration connectTimeout + @ConfigOption + @Description(""" + The maximum request rate for the Wave HTTP client (default: `1/sec`). + """) final private RateUnit maxRate HttpOpts(Map opts) { diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/RetryOpts.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/RetryOpts.groovy index 47f003014e..65cdb5dc17 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/RetryOpts.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/RetryOpts.groovy @@ -19,6 +19,9 @@ package io.seqera.wave.plugin.config import groovy.transform.CompileStatic import groovy.transform.ToString +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.script.dsl.Description import nextflow.util.Duration /** @@ -26,10 +29,30 @@ import nextflow.util.Duration */ @ToString(includeNames = true, includePackage = false) @CompileStatic -class RetryOpts { +class RetryOpts implements ConfigScope { + + @ConfigOption + @Description(""" + The initial delay when a failing HTTP request is retried (default: `450ms`). + """) Duration delay = Duration.of('450ms') + + @ConfigOption + @Description(""" + The max delay when a failing HTTP request is retried (default: `90s`). + """) Duration maxDelay = Duration.of('90s') + + @ConfigOption + @Description(""" + The max number of attempts a failing HTTP request is retried (default: `5`). + """) int maxAttempts = 5 + + @ConfigOption + @Description(""" + The jitter factor used to randomly vary retry delays (default: `0.25`). + """) double jitter = 0.25 RetryOpts() { diff --git a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy index 32480b2e82..931e321ce7 100644 --- a/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy +++ b/plugins/nf-wave/src/main/io/seqera/wave/plugin/config/WaveConfig.groovy @@ -24,6 +24,10 @@ import io.seqera.wave.api.BuildCompression import io.seqera.wave.api.ScanLevel import io.seqera.wave.api.ScanMode import io.seqera.wave.config.CondaOpts +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName +import nextflow.script.dsl.Description import nextflow.file.FileHelper import nextflow.util.Duration /** @@ -31,51 +35,83 @@ import nextflow.util.Duration * * @author Paolo Di Tommaso */ +@ScopeName("wave") +@Description(""" + The `wave` scope provides advanced configuration for the use of [Wave containers](https://docs.seqera.io/wave). +""") @Slf4j @ToString(includeNames = true, includePackage = false, includeFields = true) @CompileStatic -class WaveConfig { +class WaveConfig implements ConfigScope { final private static String DEF_ENDPOINT = 'https://wave.seqera.io' final private static List DEF_STRATEGIES = List.of('container','dockerfile','conda') - final private Boolean enabled - final private String endpoint - final private List containerConfigUrl - final private Duration tokensCacheMaxDuration - final private CondaOpts condaOpts - final private List strategy + + final BuildOpts build + + @ConfigOption + @Description(""" + Enable the use of Wave containers (default: `false`). + """) + final boolean enabled + + @ConfigOption + @Description(""" + The Wave service endpoint (default: `https://wave.seqera.io`). + """) + final String endpoint + + @ConfigOption + @Description(""" + Enable Wave container freezing (default: `false`). Wave will provision a non-ephemeral container image that will be pushed to a container repository of your choice. + + See also: `wave.build.repository` and `wave.build.cacheRepository` + """) + final boolean freeze + + final HttpOpts httpClient + + @ConfigOption + @Description(""" + Enable Wave container mirroring (default: `false`). Wave will mirror (i.e. copy) the containers in your pipeline to a container registry of your choice, so that pipeline tasks can pull the containers from this registry instead of the original one. + + See also: `wave.build.repository` + """) + final boolean mirror + + final RetryOpts retryPolicy + + final ScanOpts scan + + @ConfigOption(types=[String]) + @Description(""" + The strategy to be used when resolving multiple Wave container requirements (default: `'container,dockerfile,conda'`). + """) + final List strategy + final private Boolean bundleProjectResources - final private String buildRepository - final private String cacheRepository - final private RetryOpts retryOpts - final private HttpOpts httpClientOpts - final private Boolean freezeMode + final private List containerConfigUrl final private Boolean preserveFileTimestamp - final private Duration buildMaxDuration - final private Boolean mirrorMode - final private ScanMode scanMode - final private List scanAllowedLevels - final private BuildCompression buildCompression + final private Duration tokensCacheMaxDuration + + /* required by extension point -- do not remove */ + WaveConfig() {} WaveConfig(Map opts, Map env=System.getenv()) { - this.enabled = opts.enabled + this.build = new BuildOpts(opts.build as Map ?: Collections.emptyMap()) + this.enabled = opts.enabled as boolean this.endpoint = (opts.endpoint?.toString() ?: env.get('WAVE_API_ENDPOINT') ?: DEF_ENDPOINT)?.stripEnd('/') - this.freezeMode = opts.freeze - this.mirrorMode = opts.mirror - this.preserveFileTimestamp = opts.preserveFileTimestamp as Boolean - this.containerConfigUrl = parseConfig(opts, env) - this.tokensCacheMaxDuration = opts.navigate('tokens.cache.maxDuration', '30m') as Duration - this.condaOpts = opts.navigate('build.conda', Collections.emptyMap()) as CondaOpts - this.buildRepository = opts.navigate('build.repository') as String - this.cacheRepository = opts.navigate('build.cacheRepository') as String + this.freeze = opts.freeze as boolean + this.httpClient = new HttpOpts(opts.httpClient as Map ?: Collections.emptyMap()) + this.mirror = opts.mirror as boolean + this.retryPolicy = retryOpts0(opts) + this.scan = new ScanOpts(opts.scan as Map ?: Collections.emptyMap()) this.strategy = parseStrategy(opts.strategy) + this.bundleProjectResources = opts.bundleProjectResources - this.retryOpts = retryOpts0(opts) - this.httpClientOpts = new HttpOpts(opts.httpClient as Map ?: Map.of()) - this.buildMaxDuration = opts.navigate('build.maxDuration', '40m') as Duration - this.scanMode = opts.navigate('scan.mode') as ScanMode - this.scanAllowedLevels = parseScanLevels(opts.navigate('scan.allowedLevels')) - this.buildCompression = parseCompression(opts.navigate('build.compression') as Map) - // some validation + this.containerConfigUrl = parseConfig(opts, env) + this.preserveFileTimestamp = opts.preserveFileTimestamp as Boolean + this.tokensCacheMaxDuration = opts.navigate('tokens.cache.maxDuration', '30m') as Duration + validateConfig() } @@ -83,38 +119,38 @@ class WaveConfig { String endpoint() { this.endpoint } - CondaOpts condaOpts() { this.condaOpts } + CondaOpts condaOpts() { this.build.conda } - RetryOpts retryOpts() { this.retryOpts } + RetryOpts retryOpts() { this.retryPolicy } - HttpOpts httpOpts() { this.httpClientOpts } + HttpOpts httpOpts() { this.httpClient } List strategy() { this.strategy } - boolean freezeMode() { return this.freezeMode } + boolean freezeMode() { this.freeze } - boolean mirrorMode() { return this.mirrorMode } + boolean mirrorMode() { this.mirror } boolean preserveFileTimestamp() { return this.preserveFileTimestamp } boolean bundleProjectResources() { bundleProjectResources } - String buildRepository() { buildRepository } + String buildRepository() { build.repository } - String cacheRepository() { cacheRepository } + String cacheRepository() { build.cacheRepository } - Duration buildMaxDuration() { buildMaxDuration } + Duration buildMaxDuration() { build.maxDuration } - BuildCompression buildCompression() { buildCompression } + BuildCompression buildCompression() { build.compression } private void validateConfig() { def scheme= FileHelper.getUrlProtocol(endpoint) if( scheme !in ['http','https'] ) throw new IllegalArgumentException("Endpoint URL should start with 'http:' or 'https:' protocol prefix - offending value: '$endpoint'") - if( FileHelper.getUrlProtocol(buildRepository) ) - throw new IllegalArgumentException("Config setting 'wave.build.repository' should not include any protocol prefix - offending value: '$buildRepository'") - if( FileHelper.getUrlProtocol(cacheRepository) ) - throw new IllegalArgumentException("Config setting 'wave.build.cacheRepository' should not include any protocol prefix - offending value: '$cacheRepository'") + if( FileHelper.getUrlProtocol(build.repository) ) + throw new IllegalArgumentException("Config setting 'wave.build.repository' should not include any protocol prefix - offending value: '$build.repository'") + if( FileHelper.getUrlProtocol(build.cacheRepository) ) + throw new IllegalArgumentException("Config setting 'wave.build.cacheRepository' should not include any protocol prefix - offending value: '$build.cacheRepository'") } private RetryOpts retryOpts0(Map opts) { @@ -124,8 +160,9 @@ class WaveConfig { log.warn "Configuration options 'wave.retry' has been deprecated - replace it with 'wave.retryPolicy'" return new RetryOpts(opts.retry as Map) } - return new RetryOpts(Map.of()) + return new RetryOpts(Collections.emptyMap()) } + protected List parseStrategy(value) { if( !value ) { log.debug "Wave strategy not specified - using default: $DEF_STRATEGIES" @@ -177,11 +214,34 @@ class WaveConfig { } ScanMode scanMode() { - return scanMode + return scan.mode } List scanAllowedLevels() { - return scanAllowedLevels + return scan.allowedLevels + } +} + + +@ToString(includeNames = true, includePackage = false, includeFields = true) +@CompileStatic +class ScanOpts implements ConfigScope { + + @ConfigOption(types=[String]) + @Description(""" + Comma-separated list of allowed vulnerability levels when scanning containers for security vulnerabilities in `required` mode. + """) + final List allowedLevels + + @ConfigOption(types=[String]) + @Description(""" + Enable Wave container security scanning. Wave will scan the containers in your pipeline for security vulnerabilities. + """) + final ScanMode mode + + ScanOpts(Map opts) { + allowedLevels = parseScanLevels(opts.allowedLevels) + mode = opts.mode as ScanMode } protected List parseScanLevels(value) { @@ -196,6 +256,41 @@ class WaveConfig { } throw new IllegalArgumentException("Invalid value for 'wave.scan.levels' setting - offending value: $value; type: ${value.getClass().getName()}") } +} + + +@ToString(includeNames = true, includePackage = false, includeFields = true) +@CompileStatic +class BuildOpts implements ConfigScope { + + @ConfigOption + @Description(""" + The container repository where images built by Wave are uploaded. + """) + final String repository + + @ConfigOption + @Description(""" + The container repository used to cache image layers built by the Wave service. + """) + final String cacheRepository + + final CondaOpts conda + + final BuildCompression compression + + @ConfigOption + @Description(""" + """) + final Duration maxDuration + + BuildOpts(Map opts) { + repository = opts.repository + cacheRepository = opts.cacheRepository + conda = new CondaOpts(opts.conda as Map ?: Collections.emptyMap()) + compression = parseCompression(opts.compression as Map) + maxDuration = opts.maxDuration as Duration ?: Duration.of('40m') + } protected BuildCompression parseCompression(Map opts) { if( !opts ) diff --git a/plugins/nf-wave/src/resources/META-INF/extensions.idx b/plugins/nf-wave/src/resources/META-INF/extensions.idx index 35bfd61258..00d9b7d31e 100644 --- a/plugins/nf-wave/src/resources/META-INF/extensions.idx +++ b/plugins/nf-wave/src/resources/META-INF/extensions.idx @@ -10,4 +10,5 @@ # io.seqera.wave.plugin.WaveFactory +io.seqera.wave.plugin.config.WaveConfig io.seqera.wave.plugin.resolver.WaveContainerResolver diff --git a/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy b/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy index acd8c25d16..02f09923e5 100644 --- a/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy +++ b/plugins/nf-wave/src/test/io/seqera/wave/plugin/config/WaveConfigTest.groovy @@ -186,7 +186,7 @@ class WaveConfigTest extends Specification { given: def config = new WaveConfig([enabled: true]) expect: - config.toString() == 'WaveConfig(enabled:true, endpoint:https://wave.seqera.io, containerConfigUrl:[], tokensCacheMaxDuration:30m, condaOpts:CondaOpts(mambaImage=mambaorg/micromamba:1.5.10-noble; basePackages=conda-forge::procps-ng, commands=null), strategy:[container, dockerfile, conda], bundleProjectResources:null, buildRepository:null, cacheRepository:null, retryOpts:RetryOpts(delay:450ms, maxDelay:1m 30s, maxAttempts:5, jitter:0.25), httpClientOpts:HttpOpts(), freezeMode:null, preserveFileTimestamp:null, buildMaxDuration:40m, mirrorMode:null, scanMode:null, scanAllowedLevels:null, buildCompression:null)' + config.toString() == 'WaveConfig(build:BuildOpts(repository:null, cacheRepository:null, conda:CondaOpts(mambaImage=mambaorg/micromamba:1.5.10-noble; basePackages=conda-forge::procps-ng, commands=null), compression:null, maxDuration:40m), enabled:true, endpoint:https://wave.seqera.io, freeze:false, httpClient:HttpOpts(), mirror:false, retryPolicy:RetryOpts(delay:450ms, maxDelay:1m 30s, maxAttempts:5, jitter:0.25), scan:ScanOpts(allowedLevels:null, mode:null), strategy:[container, dockerfile, conda], bundleProjectResources:null, containerConfigUrl:[], preserveFileTimestamp:null, tokensCacheMaxDuration:30m)' } def 'should not allow invalid setting' () { From a5b2dede05de3b193ccee623ee693444624105dd Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Fri, 1 Aug 2025 12:52:08 -0500 Subject: [PATCH 2/7] Fix failing e2e tests Signed-off-by: Ben Sherman --- .../main/groovy/nextflow/container/DockerConfig.groovy | 2 +- .../src/main/nextflow/cloud/aws/config/AwsS3Config.groovy | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy index 7d3a095d62..693b30da19 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy @@ -36,7 +36,7 @@ class DockerConfig implements ConfigScope, ContainerConfig { @Description(""" Enable Docker execution (default: `false`). """) - final boolean enabled + boolean enabled @ConfigOption @Description(""" diff --git a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy index cbddc08f82..be45c3f40f 100644 --- a/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy +++ b/plugins/nf-amazon/src/main/nextflow/cloud/aws/config/AwsS3Config.groovy @@ -48,7 +48,7 @@ class AwsS3Config implements ConfigScope { @Description(""" The amount of time to wait (in milliseconds) when initially establishing a connection before timing out (default: `10000`). """) - final int connectionTimeout + final Integer connectionTimeout final Boolean debug @@ -146,7 +146,7 @@ class AwsS3Config implements ConfigScope { @Description(""" The amount of time to wait (in milliseconds) for data to be transferred over an established, open connection before the connection is timed out (default: `50000`). """) - final int socketTimeout + final Integer socketTimeout @ConfigOption @Description(""" @@ -217,7 +217,7 @@ class AwsS3Config implements ConfigScope { AwsS3Config(Map opts) { this.anonymous = opts.anonymous as Boolean - this.connectionTimeout = opts.connectionTimeout != null ? opts.connectionTimeout as int : 10000 + this.connectionTimeout = opts.connectionTimeout as Integer this.debug = opts.debug as Boolean this.endpoint = opts.endpoint ?: SysEnv.get('AWS_S3_ENDPOINT') if( endpoint && FileHelper.getUrlProtocol(endpoint) !in ['http','https'] ) @@ -236,7 +236,7 @@ class AwsS3Config implements ConfigScope { this.requesterPays = opts.requesterPays as Boolean this.s3Acl = parseS3Acl(opts.s3Acl as String) this.s3PathStyleAccess = opts.s3PathStyleAccess as Boolean - this.socketTimeout = opts.socketTimeout != null ? opts.socketTimeout as int : 50000 + this.socketTimeout = opts.socketTimeout as Integer this.storageClass = parseStorageClass((opts.storageClass ?: opts.uploadStorageClass) as String) // 'uploadStorageClass' is kept for legacy purposes this.storageEncryption = parseStorageEncryption(opts.storageEncryption as String) this.storageKmsKeyId = opts.storageKmsKeyId From 1630ef2a05cafb9b784d4a6e6cc9f6235f0ea8ee Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Fri, 1 Aug 2025 15:15:47 -0500 Subject: [PATCH 3/7] Add developer docs and JSON/markdown renderers Signed-off-by: Ben Sherman --- docs/developer/config-scopes.md | 176 ++++++++++++++++++ docs/developer/plugins.md | 5 +- docs/index.md | 1 + .../config/schema/JsonRenderer.groovy | 123 ++++++++++++ .../config/schema/MarkdownRenderer.groovy | 104 +++++++++++ 5 files changed, 408 insertions(+), 1 deletion(-) create mode 100644 docs/developer/config-scopes.md create mode 100644 modules/nextflow/src/main/groovy/nextflow/config/schema/JsonRenderer.groovy create mode 100644 modules/nextflow/src/main/groovy/nextflow/config/schema/MarkdownRenderer.groovy diff --git a/docs/developer/config-scopes.md b/docs/developer/config-scopes.md new file mode 100644 index 0000000000..8a8ece53f1 --- /dev/null +++ b/docs/developer/config-scopes.md @@ -0,0 +1,176 @@ +(config-scopes-page)= + +# Configuration scopes + +This page provides guidance on defining configuration scopes in the Nextflow runtime. + +## Overview + +The Nextflow configuration is defined as a collection of *scope classes*. Each scope class defines the set of available options, including their name, type, and an optional description for a specific configuration scope. + +Scope classes are used to generate a configuration schema, which is in turn used for several purposes: + +- Validating config options at runtime (`nextflow run` and `nextflow config`) + +- Providing code intelligence in the language server (validation, hover hints, code completion) + +- Generating reference documentation (in progress) + +Scope classes are also used by the runtime itself as type-safe domain objects. This way, the construciton of domain objects from the configuration map is isolated from the rest of the runtime. + +## Definition + +### Config scopes + +A *config scope* is defined as a class that implements the `ConfigScope` interface. Top-level scope classes must have the `@ScopeName` annotation, which defines the name of the config scope. + +For example: + +```groovy +package nextflow.hello + +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName + +@ScopeName('hello') +class HelloConfig implements ConfigScope { +} +``` + +A scope class must provide a default constructor, so that it can be instantiated as an extension point. If no such constructor is defined, the config scope will not be included in the schema. In the above example, this constructor is implicitly defined because no constructors were declared. + +The fully-qualified class name (in this case, `nextflow.hello.HelloConfig`) must be included in the list of extension points. + +### Config options + +A *config option* is defined as a field with the `@ConfigOption` annotation. The field name determines the name of the config option. + +For example: + +```groovy + @ConfigOption + String createMessage +``` + +The `@ConfigOption` annotation can specify an optional set of types that are valid in addition to the field type. For example, the `fusion.tags` option, which accepts either a String or Boolean, is declared as follows: + +```groovy + @ConfigOption(types=[Boolean]) + String tags +``` + +The field type and any additional types are included in the schema, allowing them to be used for validation. + +The field type can be any Java or Groovy class, but in practice it should be a class that can be constructed from primitive values (numbers, booleans, strings). For example, `Duration` and `MemoryUnit` are standard Nextflow types that can each be constructed from an integer or string. + +### Nested scopes + +A *nested scope* is defined as a field whose type is an implementation of `ConfigScope`. The field name determines the name of the nested scope. + +The scope class referenced by the field type defines config options and scopes in the same manner as top-level scope classes. Unlike top-level scopes, nested scope classes do not need to use the `@ScopeName` annotation or provide a default constructor. + +See `ExecutorConfig` and `ExecutorRetryConfig` for an example of how a nested scope is defined and constructed. + +### Placeholder scopes + +A *placeholder scope* is a config scope that applies to a collection of user-defined names. + +For example, the `azure.batch.pools` scope allows the user to define a set of named pools, where each pool is configured with a standard set of options such as `autoScale`, `lowPriority`, `maxVmCount`, etc. These options are defined in a placeholder scope with a placeholder name of ``. Thus, the generic name for the `autoScale` option is `azure.batch.pools..autoScale`. + +A placeholder scope is defined as a field with type `Map`, where `P` is a nested scope class which defines the scope options. The field should have the `@PlaceholderName` annotation which defines the placeholder name (e.g. ``). + +See `AzBatchOpts` and `AzPoolOpts` for an example of how placeholder scopes are defined and constructed. + +### Descriptions + +Top-level scope classes and config options should use the `@Description` annotation to provide a description of the scope or option. This description is included in the schema, which is in turn used by the language server to provide hover hints. + +For example: + +```groovy +@ScopeName('hello') +@Description(''' + The `hello` scope controls the behavior of the `nf-hello` plugin. +''') +class HelloConfig implements ConfigScope { + + @ConfigOption + @Description(''' + Message to print to standard output when a run is initialized. + ''') + String createMessage +} +``` + +Nested scopes and placeholder scopes may also use this annotation, but will inherit the description of the top-level scope by default. + +### Best practices + +The Nextflow runtime adheres the following best practices where appropriate: + +- Config options should be declared as public and final, so that the scope class can be used as an immutable domain object. + +- Scope classes should define a constructor that initializes each field from a map, casting each map property to the required type and providing default values as needed. + +- In cases where an option defaults to an environment variable, the environment map should be provided as an additional constructor argument rather than accessing the system environment directly. + +- In cases where an option with a primitive type (e.g., `int`, `float`, `boolean`) can be unspecified without a default value, it should be declared with the equivalent reference type (e.g. `Integer`, `Float`, `Boolean`), otherwise it should use the primitive type. + +- In cases where an option represents a path, it should be declared as a `String` and allow clients to construct paths as needed, since path construction may depend on plugins which aren't yet loaded. + +For example: + +```groovy +import nextflow.config.schema.ConfigOption +import nextflow.config.schema.ConfigScope +import nextflow.config.schema.ScopeName + +@ScopeName('hello') +class HelloConfig implements ConfigScope { + + @ConfigOption + final String createMessage + + @ConfigOption + final boolean verbose + + HelloConfig() {} + + HelloConfig(Map opts, Map env) { + this.createMessage = opts.createMessage ?: env.get('NXF_HELLO_CREATE_MESSAGE') + this.verbose = opts.verbose as boolean + } +} +``` + +## Usage + +### Runtime + +Nextflow validates the config map after it is loaded. Top-level config scopes are loaded by the plugin system as extension points and converted into a schema, which is used to validate the config map. + +Plugins are loaded after the config is loaded and before it is validated, since plugins can also define config scopes. If a third-party plugin declares a config scope, it must be explicitly enabled in order to validate config options from the plugin. Otherwise, Nextflow will report these options as unrecognized. + +Core plugins are loaded automatically based on other config options. Therefore, Nextflow only validates config from a core plugin when that plugin is loaded. Otherwise, any config options from the plugin are ignored -- they are neither validated nor reported as unrecognized. + +For example, when the `process.executor` config option is set to `'awsbatch'`, the `nf-amazon` is automatically loaded. In this case, all options in the `aws` config scope will be validated. If the executor is not set to `'awsbatch'`, all `aws` options will be ignored. This way, config files can be validated appropriately without loading additional core plugins that won't be used by the run. + +The scope classes themselves can be used to construct domain objects on-demand from the config map. For example, an `ExecutorConfig` can be constructed from the `executor` config scope as follows: + +```groovy +new ExecutorConfig( Global.session.config.executor as Map ?: Collections.emptyMap() ) +``` + +:::{note} +In practice, it is better to avoid the use of `Global` and provide an instance of `Session` to the client class instead. +::: + +### JSON Schema + +Config scope classes can be converted into a schema with the `SchemaNode` class, which uses reflection to extract metadata such as scope names, option names, types, and descriptions. This schema is rendered to JSON and used by the language server at build-time to provide code intelligence such as code completion and hover hints. + +### Documentation + +The schema described above can also be rendered to Markdown using the `MarkdownRenderer` class. It produces a Markdown document approximating the {ref}`config-options` page. + +This approach to docs generation is not yet complete, and has not been incorporated into the build process yet. However, it can be used to check for discrepancies between the source code and docs when making changes. The documentation should match the `@Description` annotations as closely as possible, but may contain additional details such as version notes and extra paragraphs. diff --git a/docs/developer/plugins.md b/docs/developer/plugins.md index 9cd30aa599..24363bb75c 100644 --- a/docs/developer/plugins.md +++ b/docs/developer/plugins.md @@ -133,6 +133,9 @@ import nextflow.script.dsl.Description ''') class MyPluginConfig implements ConfigScope { + /* required by extension point -- do not remove */ + MyPluginConfig() {} + MyPluginConfig(Map opts) { this.createMessage = opts.createMessage } @@ -143,7 +146,7 @@ class MyPluginConfig implements ConfigScope { } ``` -While this approach is not required to support plugin config options, it allows Nextflow to recognize plugin definitions when validating the config. +While this approach is not required to support plugin config options, it allows Nextflow to recognize plugin definitions when validating the config. See {ref}`config-scopes-page` for more information. ### Executors diff --git a/docs/index.md b/docs/index.md index 92724f7b27..aabef901a4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -152,6 +152,7 @@ migrations/index developer/index developer/diagram +developer/config-scopes developer/packages developer/plugins ``` diff --git a/modules/nextflow/src/main/groovy/nextflow/config/schema/JsonRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/config/schema/JsonRenderer.groovy new file mode 100644 index 0000000000..56390388e8 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/config/schema/JsonRenderer.groovy @@ -0,0 +1,123 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.config.schema + +import groovy.json.JsonOutput +import groovy.transform.TypeChecked +import nextflow.plugin.Plugins +import nextflow.script.dsl.Description +import nextflow.script.types.Types +import org.codehaus.groovy.ast.ClassNode + +@TypeChecked +class JsonRenderer { + + String render() { + final schema = getSchema() + return JsonOutput.toJson(schema) + } + + private static Map getSchema() { + final result = new HashMap() + for( final scope : Plugins.getExtensions(ConfigScope) ) { + final clazz = scope.getClass() + final scopeName = clazz.getAnnotation(ScopeName)?.value() + final description = clazz.getAnnotation(Description)?.value() + if( scopeName == '' ) { + SchemaNode.Scope.of(clazz, '').children().each { name, node -> + result.put(name, fromNode(node)) + } + continue + } + if( !scopeName ) + continue + final node = SchemaNode.Scope.of(clazz, description) + result.put(scopeName, fromNode(node, scopeName)) + } + return result + } + + private static Map fromNode(SchemaNode node, String name=null) { + if( node instanceof SchemaNode.Option ) + return fromOption(node) + if( node instanceof SchemaNode.Placeholder ) + return fromPlaceholder(node) + if( node instanceof SchemaNode.Scope ) + return fromScope(node, name) + throw new IllegalStateException() + } + + private static Map fromOption(SchemaNode.Option node) { + final description = node.description().stripIndent(true).trim() + final type = fromType(new ClassNode(node.type())) + + return [ + type: 'Option', + spec: [ + description: description, + type: type + ] + ] + } + + private static Map fromPlaceholder(SchemaNode.Placeholder node) { + final description = node.description().stripIndent(true).trim() + final placeholderName = node.placeholderName() + final scope = fromScope(node.scope()) + + return [ + type: 'Placeholder', + spec: [ + description: description, + placeholderName: placeholderName, + scope: scope.spec + ] + ] + } + + private static Map fromScope(SchemaNode.Scope node, String scopeName=null) { + final description = node.description().stripIndent(true).trim() + final children = node.children().collectEntries { name, child -> + Map.entry(name, fromNode(child, name)) + } + + return [ + type: 'Scope', + spec: [ + description: withLink(scopeName, description), + children: children + ] + ] + } + + private static String withLink(String scopeName, String description) { + return scopeName + ? "$description\n\n[Read more](https://nextflow.io/docs/latest/reference/config.html#$scopeName)\n" + : description + } + + private static Object fromType(ClassNode cn) { + final name = Types.getName(cn.getTypeClass()) + if( cn.isUsingGenerics() ) { + final typeArguments = cn.getGenericsTypes().collect { gt -> fromType(gt.getType()) } + return [ name: name, typeArguments: typeArguments ] + } + else { + return name + } + } + +} diff --git a/modules/nextflow/src/main/groovy/nextflow/config/schema/MarkdownRenderer.groovy b/modules/nextflow/src/main/groovy/nextflow/config/schema/MarkdownRenderer.groovy new file mode 100644 index 0000000000..5968cfca25 --- /dev/null +++ b/modules/nextflow/src/main/groovy/nextflow/config/schema/MarkdownRenderer.groovy @@ -0,0 +1,104 @@ +/* + * Copyright 2024-2025, Seqera Labs + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package nextflow.config.schema + +import groovy.transform.TypeChecked +import nextflow.plugin.Plugins +import nextflow.script.dsl.Description + +@TypeChecked +class MarkdownRenderer { + + String render() { + final schema = getSchema() + final entries = schema.entrySet().sort { entry -> entry.key } + final result = new StringBuilder() + entries.each { entry -> + final scopeName = entry.key + + final anchor = scopeName == '' + ? '(config-unscoped)=' + : "(config-$scopeName)=" + result.append("\n$anchor\n") + + final title = scopeName == '' + ? 'Unscoped options' + : "`$scopeName`" + result.append("\n## $title\n") + + final scope = entry.value + final description = scope.description() + if( description ) + result.append("\n${fromDescription(description)}\n") + result.append("\nThe following settings are available:\n") + + final options = scope.children().findAll { name, node -> node instanceof SchemaNode.Option } + renderOptions(options, scopeName, result) + + final scopes = scope.children().findAll { name, node -> node instanceof SchemaNode.Scope } + renderOptions(scopes, scopeName, result) + } + return result.toString() + } + + private static Map getSchema() { + final result = new HashMap() + for( final scope : Plugins.getExtensions(ConfigScope) ) { + final clazz = scope.getClass() + final scopeName = clazz.getAnnotation(ScopeName)?.value() + final description = clazz.getAnnotation(Description)?.value() + if( scopeName == null ) + continue + final node = SchemaNode.Scope.of(clazz, description) + result.put(scopeName, node) + } + return result + } + + private static String fromDescription(String description) { + return description.stripIndent(true).trim() + } + + private static void renderOptions(Map nodes, String scopeName, StringBuilder result) { + final prefix = scopeName ? scopeName + '.' : '' + final entries = nodes.entrySet().sort { entry -> entry.key } + entries.each { entry -> + final name = entry.key + final node = entry.value + if( node instanceof SchemaNode.Option ) + renderOption("${prefix}${name}", node, result) + else if( node instanceof SchemaNode.Placeholder ) + renderOptions(node.scope().children(), "${prefix}${name}.${node.placeholderName()}", result) + else if( node instanceof SchemaNode.Scope ) + renderOptions(node.children(), "${prefix}${name}", result) + else + throw new IllegalStateException() + } + } + + private static void renderOption(String name, SchemaNode.Option node, StringBuilder result) { + final description = fromDescription(node.description()) + if( !description ) + return + result.append("\n`${name}`") + description.eachLine { line -> + if( line ) + result.append("\n: ${line}") + } + result.append('\n') + } + +} From f7954da0c088ce4ca5dd10491c4d8730fb619d10 Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Tue, 5 Aug 2025 11:29:49 -0500 Subject: [PATCH 4/7] Restore some hidden config options Signed-off-by: Ben Sherman --- docs/reference/config.md | 18 +++++ docs/reference/env-vars.md | 5 ++ .../nextflow/container/DockerConfig.groovy | 2 +- .../src/main/nextflow/k8s/K8sConfig.groovy | 47 +++++++++++- .../nextflow/k8s/client/ClientConfig.groovy | 36 +++++++++ .../test/nextflow/k8s/K8sConfigTest.groovy | 71 +++++++++++++++--- .../k8s/client/ClientConfigTest.groovy | 75 +++++++++++++++++++ 7 files changed, 241 insertions(+), 13 deletions(-) diff --git a/docs/reference/config.md b/docs/reference/config.md index 2746d0baf7..9b7cb50482 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -984,6 +984,24 @@ The following settings are available: `k8s.autoMountHostPaths` : Automatically mount host paths into the task pods (default: `false`). Only intended for development purposes when using a single node. +`k8s.cleanup` +: When `true`, successful pods are automatically deleted (default: `true`). + +`k8s.client` +: Map of options for the Kubernetes HTTP client. +: If this option is specified, it will be used instead of `.kube/config`. +: The following options are available: + - `server` + - `token` + - `tokenFile` + - `verifySsl` + - `sslCert` + - `sslCertFile` + - `clientCert` + - `clientCertFile` + - `clientKey` + - `clientKeyFile` + `k8s.computeResourceType` : :::{versionadded} 22.05.0-edge ::: diff --git a/docs/reference/env-vars.md b/docs/reference/env-vars.md index ede3aafbfb..2c6210effa 100644 --- a/docs/reference/env-vars.md +++ b/docs/reference/env-vars.md @@ -49,6 +49,11 @@ The following environment variables control the configuration of the Nextflow ru ::: : Enable the use of Conda recipes defined by using the {ref}`process-conda` directive. (default: `false`). +`NXF_CONTAINER_ENTRYPOINT_OVERRIDE` +: :::{deprecated} 22.10.0 + ::: +: When `true`, override the container entrypoint with `/bin/bash` (default: `false`). + `NXF_DEFAULT_DSL` : :::{versionadded} 22.03.0-edge ::: diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy index 693b30da19..27c62dc642 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy @@ -56,7 +56,7 @@ class DockerConfig implements ConfigScope, ContainerConfig { """) final boolean fixOwnership - @ConfigOption + @ConfigOption(types=[String,Boolean]) @Description(""" """) final Object kill diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy index d237b3229c..f53c489985 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/K8sConfig.groovy @@ -67,6 +67,20 @@ class K8sConfig implements ConfigScope { """) final String computeResourceType + @ConfigOption + @Description(""" + When `true`, successful pods are automatically deleted (default: `true`). + """) + final private Boolean cleanup + + @ConfigOption + @Description(""" + Map of options for the K8s client. + + If this option is specified, it will be used instead of `.kube/config`. + """) + final Map client + @ConfigOption @Description(""" The Kubernetes [configuration context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) to use. @@ -181,6 +195,11 @@ class K8sConfig implements ConfigScope { """) final String storageSubPath + @ConfigOption + @Description(""" + """) + final String userName + @ConfigOption @Description(""" The path of the shared work directory (default: `/work`). Must be a path in a shared K8s persistent volume. @@ -194,6 +213,8 @@ class K8sConfig implements ConfigScope { K8sConfig(Map opts) { autoMountHostPaths = opts.autoMountHostPaths as boolean + cleanup = opts.cleanup as Boolean + client = opts.client as Map computeResourceType = opts.computeResourceType context = opts.context cpuLimits = opts.cpuLimits as boolean @@ -212,6 +233,7 @@ class K8sConfig implements ConfigScope { storageClaimName = opts.storageClaimName storageMountPath = opts.storageMountPath ?: '/workspace' storageSubPath = opts.storageSubPath + userName = opts.userName launchDir = opts.launchDir ?: "${storageMountPath}/${getUserName()}" projectDir = opts.projectDir ?: "${storageMountPath}/projects" @@ -256,11 +278,11 @@ class K8sConfig implements ConfigScope { } boolean getCleanup(boolean defValue=true) { - defValue + cleanup == null ? defValue : cleanup } String getUserName() { - System.properties.get('user.name') + userName ?: System.properties.get('user.name') } Map fuseDevicePlugin() { @@ -332,7 +354,9 @@ class K8sConfig implements ConfigScope { @Memoized ClientConfig getClient() { - final result = clientDiscovery(context, namespace, serviceAccount) + final result = client != null + ? clientFromNextflow(client, namespace, serviceAccount) + : clientDiscovery(context, namespace, serviceAccount) if( httpConnectTimeout ) result.httpConnectTimeout = httpConnectTimeout @@ -346,6 +370,23 @@ class K8sConfig implements ConfigScope { return result } + /** + * Get the K8s client config from the declaration made in the Nextflow config file + * + * @param map + * A map representing the clint configuration options define in the nextflow + * config file + * @param namespace + * The K8s namespace to be used. If omitted {@code default} is used. + * @param serviceAccount + * The K8s service account to be used. If omitted {@code default} is used. + * @return + * The Kubernetes {@link ClientConfig} object + */ + @PackageScope ClientConfig clientFromNextflow(Map map, @Nullable String namespace, @Nullable String serviceAccount ) { + ClientConfig.fromNextflowConfig(map,namespace,serviceAccount) + } + /** * Discover the K8s client config from the execution environment * that can be either a `.kube/config` file or service meta file diff --git a/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy b/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy index 1cb70942a5..4f8b0c4126 100644 --- a/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy +++ b/plugins/nf-k8s/src/main/nextflow/k8s/client/ClientConfig.groovy @@ -100,6 +100,42 @@ class ClientConfig { new ConfigDiscovery().discover(context, namespace, serviceAccount) } + static ClientConfig fromNextflowConfig(Map opts, String namespace, String serviceAccount) { + final result = new ClientConfig() + + if( opts.server ) + result.server = opts.server + + if( opts.token ) + result.token = opts.token + else if( opts.tokenFile ) + result.token = Paths.get(opts.tokenFile.toString()).getText('UTF-8') + + result.namespace = namespace ?: opts.namespace ?: 'default' + + result.serviceAccount = serviceAccount ?: 'default' + + if( opts.verifySsl ) + result.verifySsl = opts.verifySsl as boolean + + if( opts.sslCert ) + result.sslCert = opts.sslCert.toString().decodeBase64() + else if( opts.sslCertFile ) + result.sslCert = Paths.get(opts.sslCertFile.toString()).bytes + + if( opts.clientCert ) + result.clientCert = opts.clientCert.toString().decodeBase64() + else if( opts.clientCertFile ) + result.clientCert = Paths.get(opts.clientCertFile.toString()).bytes + + if( opts.clientKey ) + result.clientKey = opts.clientKey.toString().decodeBase64() + else if( opts.clientKeyFile ) + result.clientKey = Paths.get(opts.clientKeyFile.toString()).bytes + + return result + } + static ClientConfig fromUserAndCluster(Map user, Map cluster, Path location) { final base = location.isDirectory() ? location : location.parent final result = new ClientConfig() diff --git a/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy b/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy index bb0f8b42c4..a2f6956a2d 100644 --- a/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy +++ b/plugins/nf-k8s/src/test/nextflow/k8s/K8sConfigTest.groovy @@ -63,6 +63,21 @@ class K8sConfigTest extends Specification { cfg = new K8sConfig() then: 'it should return false specified as default' !cfg.getCleanup(false) + + when: + cfg = new K8sConfig(cleanup:false) + then: 'it should return false' + !cfg.getCleanup() + + when: + cfg = new K8sConfig(cleanup:true) + then: 'it should return true' + cfg.getCleanup() + + when: + cfg = new K8sConfig(cleanup:true) + then: 'the default value should be ignored' + cfg.getCleanup(false) } def 'should create config with storage claims' () { @@ -112,16 +127,51 @@ class K8sConfigTest extends Specification { cfg.fuseDevicePlugin() == ['foo/fuse':10] } - @Unroll def 'should create client config' () { + given: + def CONFIG = [namespace: 'this', serviceAccount: 'that', client: [server: 'http://foo']] + + when: + def config = new K8sConfig(CONFIG) + def client = config.getClient() + then: + client.server == 'http://foo' + client.namespace == 'this' + client.serviceAccount == 'that' + client.httpConnectTimeout == null // testing default null + client.httpReadTimeout == null // testing default null + client.retryConfig.maxAttempts == 4 + + } + + def 'should create client config with http request timeouts' () { + given: def CONFIG = [ - context: CONTEXT, - namespace: NAMESPACE, - serviceAccount: SERVICE_ACCOUNT, + namespace: 'this', + serviceAccount: 'that', + client: [server: 'http://foo'], httpReadTimeout: '20s', httpConnectTimeout: '25s' ] + + when: + def config = new K8sConfig(CONFIG) + def client = config.getClient() + then: + client.server == 'http://foo' + client.namespace == 'this' + client.serviceAccount == 'that' + client.httpConnectTimeout == Duration.of('25s') + client.httpReadTimeout == Duration.of('20s') + + } + + @Unroll + def 'should create client config with discovery' () { + + given: + def CONFIG = [context: CONTEXT, namespace: NAMESPACE, serviceAccount: SERVICE_ACCOUNT] K8sConfig config = Spy(K8sConfig, constructorArgs: [ CONFIG ]) when: @@ -132,8 +182,6 @@ class K8sConfigTest extends Specification { client.server == SERVER client.namespace == NAMESPACE ?: 'default' client.serviceAccount == SERVICE_ACCOUNT ?: 'default' - client.httpConnectTimeout == Duration.of('25s') - client.httpReadTimeout == Duration.of('20s') where: CONTEXT | SERVER | NAMESPACE | SERVICE_ACCOUNT @@ -195,6 +243,11 @@ class K8sConfigTest extends Specification { def cfg = new K8sConfig() then: cfg.getUserName() == System.properties.get('user.name') + + when: + cfg = new K8sConfig(userName: 'foo') + then: + cfg.getUserName() == 'foo' } def 'should return user dir' () { @@ -204,12 +257,12 @@ class K8sConfigTest extends Specification { cfg.getLaunchDir() == '/workspace/' + System.properties.get('user.name') when: - cfg = new K8sConfig(storageMountPath: '/this/path') + cfg = new K8sConfig(storageMountPath: '/this/path', userName: 'foo') then: - cfg.getLaunchDir() == '/this/path/' + System.properties.get('user.name') + cfg.getLaunchDir() == '/this/path/foo' when: - cfg = new K8sConfig(storageMountPath: '/this/path', launchDir: '/my/path') + cfg = new K8sConfig(storageMountPath: '/this/path', userName: 'foo', launchDir: '/my/path') then: cfg.getLaunchDir() == '/my/path' diff --git a/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy b/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy index d7e46bfb61..5993921a64 100644 --- a/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy +++ b/plugins/nf-k8s/src/test/nextflow/k8s/client/ClientConfigTest.groovy @@ -42,4 +42,79 @@ class ClientConfigTest extends Specification { } + def 'should create a client config from a map' () { + + given: + def MAP = [ + server:'foo.com', + token: 'blah-blah', + namespace: 'my-namespace', + verifySsl: true, + sslCert: 'fizzbuzz'.bytes.encodeBase64().toString(), + clientCert: 'hello'.bytes.encodeBase64().toString(), + clientKey: 'world'.bytes.encodeBase64().toString() ] + + when: + def result = ClientConfig.fromNextflowConfig(MAP, null, null) + + then: + result.server == 'foo.com' + result.token == 'blah-blah' + result.namespace == 'my-namespace' + result.serviceAccount == 'default' + result.verifySsl + result.clientCert == 'hello'.bytes + result.clientKey == 'world'.bytes + result.sslCert == 'fizzbuzz'.bytes + + when: + result = ClientConfig.fromNextflowConfig(MAP, 'ns1', 'sa2') + then: + result.server == 'foo.com' + result.token == 'blah-blah' + result.namespace == 'ns1' + result.serviceAccount == 'sa2' + result.verifySsl + result.clientCert == 'hello'.bytes + result.clientKey == 'world'.bytes + result.sslCert == 'fizzbuzz'.bytes + } + + def 'should create a client config from a map with files' () { + + given: + def folder = Files.createTempDirectory('test') + def file1 = folder.resolve('file1') + def file2 = folder.resolve('file2') + def file3 = folder.resolve('file3') + file1.text = 'fizzbuzz'.bytes.encodeBase64().toString() + file2.text = 'hello'.bytes.encodeBase64().toString() + file3.text = 'world'.bytes.encodeBase64().toString() + + def MAP = [ + server:'foo.com', + token: 'blah-blah', + namespace: 'my-namespace', + verifySsl: false, + sslCertFile: file1, + clientCertFile: file2, + clientKeyFile: file3 ] + + when: + def result = ClientConfig.fromNextflowConfig(MAP, null, null) + + then: + result.server == 'foo.com' + result.token == 'blah-blah' + result.namespace == 'my-namespace' + result.serviceAccount == 'default' + !result.verifySsl + result.sslCert == file1.text.bytes + result.clientCert == file2.text.bytes + result.clientKey == file3.text.bytes + + cleanup: + folder?.deleteDir() + } + } From f51d3e28b3cfeaa7b96ddf9a039a5f5cfc92b2f0 Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Wed, 6 Aug 2025 16:05:11 -0500 Subject: [PATCH 5/7] Document writableInputMounts for charliecloud and docker Signed-off-by: Ben Sherman --- docs/reference/config.md | 6 ++++++ .../nextflow/container/CharliecloudBuilder.groovy | 6 +++--- .../nextflow/container/CharliecloudConfig.groovy | 7 +++++++ .../nextflow/container/ContainerConfig.groovy | 4 ---- .../groovy/nextflow/container/DockerBuilder.groovy | 6 +++--- .../groovy/nextflow/container/DockerConfig.groovy | 7 +++++++ .../nextflow/executor/BashWrapperBuilder.groovy | 3 --- .../container/CharliecloudBuilderTest.groovy | 14 ++++---------- .../nextflow/container/DockerBuilderTest.groovy | 4 +--- 9 files changed, 31 insertions(+), 26 deletions(-) diff --git a/docs/reference/config.md b/docs/reference/config.md index 9b7cb50482..a230f069f7 100644 --- a/docs/reference/config.md +++ b/docs/reference/config.md @@ -526,6 +526,9 @@ The following settings are available: `charliecloud.temp` : Mounts a path of your choice as the `/tmp` directory in the container. Use the special value `'auto'` to create a temporary directory each time a container is created. +`charliecloud.writableInputMounts` +: When `false`, mount input directories as read-only (default: `true`). + `charliecloud.writeFake` : Run containers from storage in writeable mode using overlayfs (default: `true`). : This option requires unprivileged `overlayfs` (Linux kernel >= 5.11). For full support, tempfs with xattrs in the user namespace (Linux kernel >= 6.6) is required. See [charliecloud documentation](https://hpc.github.io/charliecloud/ch-run.html#ch-run-overlay) for details. @@ -646,6 +649,9 @@ The following settings are available: `docker.tty` : Allocates a pseudo-tty (default: `false`). +`docker.writableInputMounts` +: When `false`, mount input directories as read-only (default: `true`). + (config-env)= ## `env` diff --git a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy index 3c628699ab..e2e959291b 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudBuilder.groovy @@ -45,6 +45,9 @@ class CharliecloudBuilder extends ContainerBuilder { if( config.temp ) this.temp = config.temp + if( !config.writableInputMounts ) + this.readOnlyInputs = true + this.writeFake = config.writeFake } @@ -58,9 +61,6 @@ class CharliecloudBuilder extends ContainerBuilder { if( params.containsKey('entry') ) this.entryPoint = params.entry - if( params.containsKey('readOnlyInputs') ) - this.readOnlyInputs = params.readOnlyInputs - return this } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy index 9f817a462e..4633ba70f9 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/CharliecloudConfig.groovy @@ -71,6 +71,12 @@ class CharliecloudConfig implements ConfigScope, ContainerConfig { """) final String temp + @ConfigOption + @Description(""" + When `false`, mount input directories as read-only (default: `true`). + """) + final boolean writableInputMounts + @ConfigOption @Description(""" Run containers from storage in writeable mode using overlayfs (default: `true`). @@ -88,6 +94,7 @@ class CharliecloudConfig implements ConfigScope, ContainerConfig { registry = opts.registry runOptions = opts.runOptions temp = opts.temp + writableInputMounts = opts.writableInputMounts != null ? opts.writableInputMounts as boolean : true writeFake = opts.writeFake != null ? opts.writeFake as boolean : true } diff --git a/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy index 9d4c3b62ff..6d7525e630 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/ContainerConfig.groovy @@ -54,10 +54,6 @@ interface ContainerConfig { return null } - default boolean writableInputMounts() { - return true - } - String getEngine() List getEnvWhitelist() diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy index bb99d10c9e..de392de22f 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerBuilder.groovy @@ -74,6 +74,9 @@ class DockerBuilder extends ContainerBuilder { this.temp = config.temp this.tty = config.tty + + if( !config.writableInputMounts ) + this.readOnlyInputs = true } DockerBuilder(String name) { @@ -90,9 +93,6 @@ class DockerBuilder extends ContainerBuilder { if( params.containsKey('kill') ) this.kill = params.kill - if( params.containsKey('readOnlyInputs') ) - this.readOnlyInputs = params.readOnlyInputs - if( params.containsKey('privileged') ) this.privileged = params.privileged diff --git a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy index 27c62dc642..b4f0a32b01 100644 --- a/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/container/DockerConfig.groovy @@ -115,6 +115,12 @@ class DockerConfig implements ConfigScope, ContainerConfig { """) final boolean tty + @ConfigOption + @Description(""" + When `false`, mount input directories as read-only (default: `true`). + """) + final boolean writableInputMounts + /* required by extension point -- do not remove */ DockerConfig() {} @@ -133,6 +139,7 @@ class DockerConfig implements ConfigScope, ContainerConfig { sudo = opts.sudo as boolean temp = opts.temp tty = opts.tty as boolean + writableInputMounts = opts.writableInputMounts != null ? opts.writableInputMounts as boolean : true if( opts.userEmulation ) log.warn1("Config setting `docker.userEmulation` is not supported anymore") diff --git a/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy b/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy index d6e9a8bb7d..580fa952cb 100644 --- a/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy +++ b/modules/nextflow/src/main/groovy/nextflow/executor/BashWrapperBuilder.groovy @@ -740,9 +740,6 @@ class BashWrapperBuilder { if( containerConfig.getKill() != null ) builder.params(kill: containerConfig.getKill()) - if( containerConfig.writableInputMounts()==false ) - builder.params(readOnlyInputs: true) - if( containerConfig.entrypointOverride() ) builder.params(entry: '/bin/bash') diff --git a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy index d948021405..ce4acd81ff 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/CharliecloudBuilderTest.groovy @@ -44,8 +44,7 @@ class CharliecloudBuilderTest extends Specification { .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' - new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(writeFake: false)) - .params(readOnlyInputs: true) + new CharliecloudBuilder('/cacheDir/busybox', new CharliecloudConfig(writableInputMounts: false, writeFake: false)) .build() .runCommand == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -b "$NXF_TASK_WORKDIR" /cacheDir/busybox --' @@ -110,18 +109,16 @@ class CharliecloudBuilderTest extends Specification { cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', new CharliecloudConfig(writableInputMounts: false)) .params(entry:'/bin/sh') - .params(readOnlyInputs: true) .build() .getRunCommand('bwa --this --that file.fastq') then: cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env --write-fake -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - cmd = new CharliecloudBuilder('/cacheDir/ubuntu') + cmd = new CharliecloudBuilder('/cacheDir/ubuntu', new CharliecloudConfig(writableInputMounts: false)) .params(entry:'/bin/sh') - .params(readOnlyInputs: false) .build() .getRunCommand('bwa --this --that file.fastq') then: @@ -131,17 +128,15 @@ class CharliecloudBuilderTest extends Specification { config = new CharliecloudConfig(writeFake: false) cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .params(entry:'/bin/sh') - .params(readOnlyInputs: false) .build() .getRunCommand('bwa --this --that file.fastq') then: cmd == 'ch-run --unset-env="*" -c "$NXF_TASK_WORKDIR" --set-env -w -b "$NXF_TASK_WORKDIR" /cacheDir/ubuntu -- /bin/sh -c "bwa --this --that file.fastq"' when: - config = new CharliecloudConfig(writeFake: false) + config = new CharliecloudConfig(writableInputMounts: false, writeFake: false) cmd = new CharliecloudBuilder('/cacheDir/ubuntu', config) .params(entry:'/bin/sh') - .params(readOnlyInputs: true) .addMount(db_file) .addMount(db_file) .build().getRunCommand('bwa --this --that file.fastq') @@ -151,7 +146,6 @@ class CharliecloudBuilderTest extends Specification { when: cmd = new CharliecloudBuilder('/cacheDir/ubuntu') .params(entry:'/bin/sh') - .params(readOnlyInputs: false) .addMount(db_file) .addMount(db_file) .build() diff --git a/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy b/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy index eaf2742443..594e18fde0 100644 --- a/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy +++ b/modules/nextflow/src/test/groovy/nextflow/container/DockerBuilderTest.groovy @@ -110,13 +110,11 @@ class DockerBuilderTest extends Specification { .build() .runCommand == 'docker run -i -e "FOO=1" -e "BAR=hello world" -v /home/db:/home/db -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('fedora') - .params(readOnlyInputs: true) + new DockerBuilder('fedora', new DockerConfig(writableInputMounts: false)) .addMount(db_file) .build() .runCommand == 'docker run -i -v /home/db:/home/db:ro -v "$NXF_TASK_WORKDIR":"$NXF_TASK_WORKDIR" -w "$NXF_TASK_WORKDIR" fedora' - new DockerBuilder('fedora', new DockerConfig(mountFlags: 'Z')) .addMount(db_file) .build() From 618130126976310a565874d84ef1da380f546ec7 Mon Sep 17 00:00:00 2001 From: Paolo Di Tommaso Date: Thu, 7 Aug 2025 17:22:16 +0200 Subject: [PATCH 6/7] [e2e prod] run tests Signed-off-by: Paolo Di Tommaso From 68355c7a2e5c365e989d3f165fed420e1d09ae65 Mon Sep 17 00:00:00 2001 From: Ben Sherman Date: Thu, 7 Aug 2025 12:02:03 -0500 Subject: [PATCH 7/7] Disable plugin-registry.nf e2e test Signed-off-by: Ben Sherman --- tests/checks/.IGNORE | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/checks/.IGNORE b/tests/checks/.IGNORE index 0144b46cc1..2f46354ac7 100644 --- a/tests/checks/.IGNORE +++ b/tests/checks/.IGNORE @@ -1 +1,2 @@ -# TEST TO BE IGNORED \ No newline at end of file +# TEST TO BE IGNORED +plugin-registry.nf \ No newline at end of file