Skip to content

Commit

Permalink
Merge branch 'CW-4398-base-config' into 'dev'
Browse files Browse the repository at this point in the history
Initialise base.config

Closes CW-4398

See merge request epi2melabs/workflows/wf-basecalling!92
  • Loading branch information
mattdmem committed Oct 9, 2024
2 parents 7057ec5 + 70215ac commit e837d86
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 130 deletions.
123 changes: 123 additions & 0 deletions base.config
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
params {
wf {
basecaller_container = "ontresearch/dorado"
container_sha = "shae36d1b49fe470a60e006afad90bedd2fc2774a89"
bonito_container = "ontresearch/bonito"
bonito_sha = "shaea43ca2333f91fa78a823f640ba158e4268f1f98"
common_sha = "shad28e55140f75a68f59bbecc74e880aeab16ab158"
}
}


// used by default for "standard" (docker) and singularity profiles,
// other profiles may override.
process {
withLabel:wf_basecalling {
container = "${params.wf.basecaller_container}:${params.wf.container_sha}"
}
withLabel:wf_common {
container = "ontresearch/wf-common:${params.wf.common_sha}"
}

shell = ['/bin/bash', '-euo', 'pipefail']

// by default GPU tasks will run in serial to avoid GPU management.
// cluster and cloud users can remove this with -profile discrete_gpus.
// we use profiles to handle this as maxForks cannot be set dynamically
// see https://github.com/nextflow-io/nextflow/discussions/3806 and CW-1857
withLabel:gpu {
maxForks = 1
}
}

profiles {
// the "standard" profile is used implicitely by nextflow
// if no other profile is given on the CLI
standard {
docker {
enabled = true
// this ensures container is run as host user and group, but
// also adds host user to the within-container group
runOptions = "--user \$(id -u):\$(id -g) --group-add 100"
}
process."withLabel:gpu".containerOptions = "--gpus all"
}

// using singularity instead of docker
singularity {
singularity {
enabled = true
autoMounts = true
//envWhitelist = "" // if your cluster sets a variable to indicate which GPU has been assigned you will want to allow it here
}
process."withLabel:gpu".containerOptions = "--nv"
}


// keep stub conda profile to prevent unknown profile warning so users get a better error
conda {
conda.enabled = true
}


// Using AWS batch.
// May need to set aws.region and aws.batch.cliPath
awsbatch {
process {
executor = 'awsbatch'
queue = "${params.aws_queue}"
memory = "16 GB" // likely not enough!
withLabel:wf_common {
container = "${params.aws_image_prefix}-wf-common:${params.wf.common_sha}"
}
shell = ['/bin/bash', '-euo', 'pipefail']

// lift limit on simultaneous gpu jobs for cloud
// and ensure that the host mounts relevant driver bobbins inside the container
withLabel:gpu {
maxForks = null
containerOptions = "-e NVIDIA_DRIVER_CAPABILITIES=compute,utility --gpus all"
}
withLabel:wf_basecalling {
container = "${params.aws_image_prefix}-dorado:${params.wf.container_sha}"
}
withLabel:wf_bonito {
container = "${params.aws_image_prefix}-bonito:${params.wf.bonito_sha}"
}
}
}

// local profile for simplified development testing
local {
process.executor = 'local'
}

// lift limit on simultaneous gpu jobs
discrete_gpus {
process."withLabel:gpu".maxForks = null
}
}


timeline {
enabled = true
file = "${params.out_dir}/execution/timeline.html"
overwrite = true
}
report {
enabled = true
file = "${params.out_dir}/execution/report.html"
overwrite = true
}
trace {
enabled = true
file = "${params.out_dir}/execution/trace.txt"
overwrite = true
}

env {
PYTHONNOUSERSITE = 1
JAVA_TOOL_OPTIONS = "-Xlog:disable -Xlog:all=warning:stderr"
}

cleanup = true
133 changes: 3 additions & 130 deletions nextflow.config
Original file line number Diff line number Diff line change
@@ -1,15 +1,8 @@
//
// Notes to End Users.
//
// The workflow should run without editing this configuration file,
// however there may be instances in which you wish to edit this
// file for compute performance or other reasons. Please see:
//
// https://nextflow.io/docs/latest/config.html#configuration
//
// for further help editing this file.
// import profiles and workflow SHA from core
includeConfig "base.config"


// define workflow params
params {
help = false
version = false
Expand Down Expand Up @@ -65,11 +58,6 @@ params {
igv = false

wf {
basecaller_container = "ontresearch/dorado"
container_sha = "shae36d1b49fe470a60e006afad90bedd2fc2774a89"
bonito_container = "ontresearch/bonito"
bonito_sha = "shaea43ca2333f91fa78a823f640ba158e4268f1f98"
common_sha = "shad28e55140f75a68f59bbecc74e880aeab16ab158"
example_cmd = [
"--basecaller_cfg '[email protected]'",
"--dorado_ext 'pod5'",
Expand All @@ -95,118 +83,3 @@ epi2melabs {
tags = "basecalling,utility"
icon = "faTty"
}


// used by default for "standard" (docker) and singularity profiles,
// other profiles may override.
process {
withLabel:wf_basecalling {
container = "${params.wf.basecaller_container}:${params.wf.container_sha}"
}
withLabel:wf_common {
container = "ontresearch/wf-common:${params.wf.common_sha}"
}

shell = ['/bin/bash', '-euo', 'pipefail']

// by default GPU tasks will run in serial to avoid GPU management.
// cluster and cloud users can remove this with -profile discrete_gpus.
// we use profiles to handle this as maxForks cannot be set dynamically
// see https://github.com/nextflow-io/nextflow/discussions/3806 and CW-1857
withLabel:gpu {
maxForks = 1
}
}


profiles {
// the "standard" profile is used implicitely by nextflow
// if no other profile is given on the CLI
standard {
docker {
enabled = true
// this ensures container is run as host user and group, but
// also adds host user to the within-container group
runOptions = "--user \$(id -u):\$(id -g) --group-add 100"
}
process."withLabel:gpu".containerOptions = "--gpus all"
}

// using singularity instead of docker
singularity {
singularity {
enabled = true
autoMounts = true
//envWhitelist = "" // if your cluster sets a variable to indicate which GPU has been assigned you will want to allow it here
}
process."withLabel:gpu".containerOptions = "--nv"
}


// keep stub conda profile to prevent unknown profile warning so users get a better error
conda {
conda.enabled = true
}


// Using AWS batch.
// May need to set aws.region and aws.batch.cliPath
awsbatch {
process {
executor = 'awsbatch'
queue = "${params.aws_queue}"
memory = "16 GB" // likely not enough!
withLabel:wf_common {
container = "${params.aws_image_prefix}-wf-common:${params.wf.common_sha}"
}
shell = ['/bin/bash', '-euo', 'pipefail']

// lift limit on simultaneous gpu jobs for cloud
// and ensure that the host mounts relevant driver bobbins inside the container
withLabel:gpu {
maxForks = null
containerOptions = "-e NVIDIA_DRIVER_CAPABILITIES=compute,utility --gpus all"
}
withLabel:wf_basecalling {
container = "${params.aws_image_prefix}-dorado:${params.wf.container_sha}"
}
withLabel:wf_bonito {
container = "${params.aws_image_prefix}-bonito:${params.wf.bonito_sha}"
}
}
}

// local profile for simplified development testing
local {
process.executor = 'local'
}

// lift limit on simultaneous gpu jobs
discrete_gpus {
process."withLabel:gpu".maxForks = null
}
}


timeline {
enabled = true
file = "${params.out_dir}/execution/timeline.html"
overwrite = true
}
report {
enabled = true
file = "${params.out_dir}/execution/report.html"
overwrite = true
}
trace {
enabled = true
file = "${params.out_dir}/execution/trace.txt"
overwrite = true
}

env {
PYTHONNOUSERSITE = 1
JAVA_TOOL_OPTIONS = "-Xlog:disable -Xlog:all=warning:stderr"
}

cleanup = true

0 comments on commit e837d86

Please sign in to comment.