-
Notifications
You must be signed in to change notification settings - Fork 2
/
nextflow.config
120 lines (107 loc) · 3.15 KB
/
nextflow.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
/*
* -------------------------------------------------
* h3achipimputation Nextflow config file
* -------------------------------------------------
* Default config options for all environments.
* Cluster-specific config options should be saved
* in the conf folder and imported under a profile
* name here.
*/
// Global default params, used in configs
params {
version = '1.0'
container = 'docker://quay.io/h3abionet_org/imputation_tools:latest' // Container slug. Stable releases should specify release tag!
clusterOptions = false
help = false
chromosomes = "ALL" // Impute all chromosomes by default
project_name = "h3achipimputation" // Default project name
// Minimac4 option
minRatio = '0.01'
chunk = ''
outdir = "./"
}
profiles {
standard {
includeConfig 'conf/base.config'
}
conda { process.conda = "$baseDir/environment.yml" }
docker {
docker.enabled = true
process.container = 'docker://quay.io/h3abionet_org/imputation_tools:latest'
}
singularity {
singularity.enabled = true
singularity.autoMounts = true
process.container = 'docker://quay.io/h3abionet_org/imputation_tools:latest'
}
test {
includeConfig 'conf/base.config'
includeConfig 'conf/test.config'
}
slurm {
includeConfig 'conf/base.config'
process.executor = 'slurm'
}
}
// Capture exit codes from upstream processes when piping
process.shell = ['/bin/bash', '-euo', 'pipefail']
manifest {
name = 'h3achipimputation_evaluate_chips'
description = 'imputation'
homePage = 'https://github.com/h3abionet/chipimputation_evaluate_chips'
mainScript = 'main.nf'
version = '>=19.04.1'
}
timeline {
enabled = true
file = "${params.outdir}/execution_timeline.html"
}
report {
enabled = true
file = "${params.outdir}/execution_report.html"
}
trace {
enabled = true
file = "${params.outdir}/execution_trace.txt"
}
dag {
enabled = true
file = "${params.outdir}/pipeline_dag.svg"
}
// Function to ensure that resource requirements don't go beyond
// a maximum limit
def check_max(obj, type) {
if(type == 'memory'){
try {
if(obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
return params.max_memory as nextflow.util.MemoryUnit
else
return obj
} catch (all) {
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
return obj
}
} else if(type == 'time'){
try {
if(obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
return params.max_time as nextflow.util.Duration
else
return obj
} catch (all) {
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
return obj
}
} else if(type == 'cpus'){
try {
return Math.min( obj, params.max_cpus as int )
} catch (all) {
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
return obj
}
}
}
workflow.onComplete = {
println "========================================="
println "Pipeline completed at: $workflow.complete"
println "Execution status: ${ workflow.success ? 'OK' : 'failed' }"
}