diff --git a/.travis.yml b/.travis.yml index c7b117c85..ba99d8215 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,28 +1,33 @@ sudo: required -language: java +language: python jdk: openjdk8 services: docker +python: '3.6' +cache: pip +matrix: + fast_finish: true -before_install: docker pull nfcore/rnaseq:latest +before_install: + # PRs to master are only ok if coming from dev branch + - '[ $TRAVIS_PULL_REQUEST = "false" ] || [ $TRAVIS_BRANCH != "master" ] || ([ $TRAVIS_PULL_REQUEST_SLUG = $TRAVIS_REPO_SLUG ] && [ $TRAVIS_PULL_REQUEST_BRANCH = "dev" ])' + # Pull the docker image first so the test doesn't wait for this + - docker pull nfcore/rnaseq + # Fake the tag locally so that the pipeline runs properly + - docker tag nfcore/rnaseq nfcore/rnaseq:1.0 install: # Install Nextflow - - mkdir /tmp/nextflow - - cd /tmp/nextflow + - mkdir /tmp/nextflow && cd /tmp/nextflow - wget -qO- get.nextflow.io | bash - sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow # Install nf-core/tools - - git clone https://github.com/nf-core/tools.git /tmp/nf-core-tools - - cd /tmp/nf-core-tools - - pip install --user -e . - # Make test directories - - mkdir ${TRAVIS_BUILD_DIR}/tests - - cd ${TRAVIS_BUILD_DIR}/tests - + - pip install nf-core + # Reset + - mkdir ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests env: - - NXF_VER=0.30.1 - - '' + - NXF_VER='0.31.1' # Specify a minimum NF version that should be tested and work + - NXF_VER='' # Plus: get the latest NF version and check that it works script: # Lint the pipeline code diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c2a40096..8b41e1d39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,42 @@ -## nf-core/rnaseq v1.0dev -This release marks the point where the pipeline was moved from SciLifeLab/NGI-RNAseq -over to the new nf-core community, at nf-core/rnaseq. +# nf-core/rnaseq + +## [Version 1.0](https://github.com/nf-core/rnaseq/releases/tag/1.0) - 2018-08-20 + +This release marks the point where the pipeline was moved from [SciLifeLab/NGI-RNAseq](https://github.com/SciLifeLab/NGI-RNAseq) +over to the new [nf-core](http://nf-co.re/) community, at [nf-core/rnaseq](https://github.com/nf-core/rnaseq). View the previous changelog at [SciLifeLab/NGI-RNAseq/CHANGELOG.md](https://github.com/SciLifeLab/NGI-RNAseq/blob/master/CHANGELOG.md) + +In addition to porting to the new nf-core community, the pipeline has had a number of major changes in this version. +There have been 157 commits by 16 different contributors covering 70 different files in the pipeline: 7,357 additions and 8,236 deletions! + +In summary, the main changes are: + +* Rebranding and renaming throughout the pipeline to nf-core +* Updating many parts of the pipeline config and style to meet nf-core standards +* Support for GFF files in addition to GTF files + * Just use `--gff` instead of `--gtf` when specifying a file path +* New command line options to skip various quality control steps +* More safety checks when launching a pipeline + * Several new sanity checks - for example, that the specified reference genome exists +* Improved performance with memory usage (especially STAR and Picard) +* New BigWig file outputs for plotting coverage across the genome +* Refactored gene body coverage calculation, now much faster and using much less memory +* Bugfixes in the MultiQC process to avoid edge cases where it wouldn't run +* MultiQC report now automatically attached to the email sent when the pipeline completes +* New testing method, with data on GitHub + * Now run pipeline with `-profile test` instead of using bash scripts +* Rewritten continuous integration tests with Travis CI +* New explicit support for Singularity containers +* Improved MultiQC support for DupRadar and featureCounts + * Now works for all users instead of just NGI Stockholm +* New configuration for use on AWS batch +* Updated config syntax to support latest versions of Nextflow +* Built-in support for a number of new local HPC systems + * CCGA, GIS, UCT HEX, updates to UPPMAX, CFC, BINAC, Hebbe, c3se +* Slightly improved documentation (more updates to come) +* Updated software packages + +...and many more minor tweaks. + +Thanks to everyone who has worked on this release! diff --git a/Dockerfile b/Dockerfile index d4fdb9492..1c7574a5d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,4 +4,5 @@ LABEL authors="phil.ewels@scilifelab.se" \ description="Docker image containing all requirements for the nfcore/rnaseq pipeline" COPY environment.yml / -RUN conda env update -n root -f /environment.yml && conda clean -a +RUN conda env create -f /environment.yml && conda clean -a +ENV PATH /opt/conda/envs/nf-core-rnaseq-1.0/bin:$PATH diff --git a/README.md b/README.md index 2cedeabd0..3a769ec03 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # ![nfcore/rnaseq](docs/images/nfcore-rnaseq_logo.png) [![Build Status](https://travis-ci.org/nf-core/rnaseq.svg?branch=master)](https://travis-ci.org/nf-core/rnaseq) -[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.30.1-brightgreen.svg)](https://www.nextflow.io/) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.31.1-brightgreen.svg)](https://www.nextflow.io/) [![Gitter](https://img.shields.io/badge/gitter-%20join%20chat%20%E2%86%92-4fb99a.svg)](https://gitter.im/nf-core/Lobby) [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](http://bioconda.github.io/) diff --git a/Singularity b/Singularity index d286cbf35..fa004de48 100644 --- a/Singularity +++ b/Singularity @@ -3,12 +3,16 @@ Bootstrap:docker %labels MAINTAINER Phil Ewels - DESCRIPTION Container image containing all requirements for the nf-core/rnaseq pipeline - VERSION 1.0dev + DESCRIPTION Singularity image containing all requirements for the nf-core/rnaseq pipeline + VERSION 1.0 + +%environment + PATH=/opt/conda/envs/nf-core-rnaseq-1.0/bin:$PATH + export PATH %files environment.yml / %post - /opt/conda/bin/conda env update -n root -f /environment.yml + /opt/conda/bin/conda env create -f /environment.yml /opt/conda/bin/conda clean -a diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py index ba6e83ed5..eb7eb55c5 100755 --- a/bin/scrape_software_versions.py +++ b/bin/scrape_software_versions.py @@ -14,6 +14,7 @@ 'Picard MarkDuplicates': ['v_markduplicates.txt', r"([\d\.]+)-SNAPSHOT"], 'Samtools': ['v_samtools.txt', r"samtools (\S+)"], 'featureCounts': ['v_featurecounts.txt', r"featureCounts v(\S+)"], + 'deepTools': ['v_deeptools.txt', r"bamCoverage (\S+)"], 'StringTie': ['v_stringtie.txt', r"(\S+)"], 'Preseq': ['v_preseq.txt', r"Version: (\S+)"], 'RSeQC': ['v_rseqc.txt', r"read_duplication.py ([\d\.]+)"], @@ -32,6 +33,7 @@ results['featureCounts'] = 'N/A' results['StringTie'] = 'N/A' results['Preseq'] = 'N/A' +results['deepTools'] = 'N/A' results['RSeQC'] = 'N/A' results['MultiQC'] = 'N/A' diff --git a/conf/base.config b/conf/base.config index 24fa6dce0..c78239b5b 100644 --- a/conf/base.config +++ b/conf/base.config @@ -13,78 +13,76 @@ process { container = params.container - cpus = { check_max( 1 * task.attempt, 'cpus' ) } + cpus = { check_max( 2, 'cpus' ) } memory = { check_max( 8.GB * task.attempt, 'memory' ) } time = { check_max( 2.h * task.attempt, 'time' ) } - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'terminate' } - maxRetries = 1 + errorStrategy = { task.exitStatus in [1,143,137,104,134,139] ? 'retry' : 'terminate' } + maxRetries = 3 maxErrors = '-1' // Process-specific resource requirements withName:makeSTARindex { - cpus = { check_max( 10 * task.attempt, 'cpus' ) } + cpus = { check_max( 10, 'cpus' ) } memory = { check_max( 80.GB * task.attempt, 'memory' ) } time = { check_max( 5.h * task.attempt, 'time' ) } } withName:makeHISATindex { - cpus = { check_max( 10 * task.attempt, 'cpus' ) } + cpus = { check_max( 10, 'cpus' ) } memory = { check_max( 80.GB * task.attempt, 'memory' ) } time = { check_max( 5.h * task.attempt, 'time' ) } } - withName:fastqc { - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'ignore' } - } withName:trim_galore { - cpus = { check_max( 2 * task.attempt, 'cpus' ) } memory = { check_max( 16.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } withName:star { - cpus = { check_max( 10 * task.attempt, 'cpus' ) } + cpus = { check_max (10, 'cpus')} memory = { check_max( 80.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } withName:hisat2Align { - cpus = { check_max( 8 * task.attempt, 'cpus' ) } + cpus = { check_max( 8, 'cpus' ) } memory = { check_max( 64.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } withName:hisat2_sortOutput { - cpus = { check_max( 4 * task.attempt, 'cpus' ) } + cpus = { check_max( 4, 'cpus' ) } memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 8.h * task.attempt, 'time' ) } } withName:rseqc { + cpus = { check_max( 8, 'cpus' ) } + memory = { check_max( 32.GB * task.attempt, 'memory' ) } + time = { check_max( 7.h * task.attempt, 'time' ) } + errorStrategy = 'ignore' + } + withName:createBigWig { cpus = { check_max( 8 * task.attempt, 'cpus' ) } memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 7.h * task.attempt, 'time' ) } - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'ignore' } } withName:genebody_coverage { - cpus = { check_max( 1 * task.attempt, 'cpus' ) } + cpus = { check_max( 1, 'cpus' ) } memory = { check_max( 32.GB * task.attempt, 'memory' ) } time = { check_max( 7.h * task.attempt, 'time' ) } - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'ignore' } - } - withName:preseq { - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'ignore' } } withName:markDuplicates { - cpus = { check_max( 2 * task.attempt, 'cpus' ) } memory = { check_max( 16.GB * task.attempt, 'memory' ) } } withName:dupradar { - cpus = { check_max( 2 * task.attempt, 'cpus' ) } + cpus = { check_max( 1, 'cpus' ) } + memory = { check_max( 16.GB * task.attempt, 'memory' ) } + } + withName:featureCounts { memory = { check_max( 16.GB * task.attempt, 'memory' ) } } withName:sample_correlation { - cpus = { check_max( 2 * task.attempt, 'cpus' ) } memory = { check_max( 16.GB * task.attempt, 'memory' ) } } withName:multiqc { memory = { check_max( 2.GB * task.attempt, 'memory' ) } - errorStrategy = { task.exitStatus in [143,137] ? 'retry' : 'ignore' } + errorStrategy = 'ignore' } withName:get_software_versions { memory = { check_max( 2.GB, 'memory' ) } @@ -94,7 +92,6 @@ process { withName:workflow_summary_mqc { memory = { check_max( 2.GB, 'memory' ) } cache = false - executor = 'local' errorStrategy = 'ignore' } } @@ -105,5 +102,4 @@ params { max_cpus = 16 max_time = 240.h igenomes_base = 's3://ngi-igenomes/igenomes/' - maxMultiqcEmailFileSize = 25.MB } diff --git a/environment.yml b/environment.yml index c205a77ac..6f78e33ea 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,6 @@ # You can use this file to create a conda environment for this pipeline: # conda env create -f environment.yml -name: nfcore-rnaseq-1.0dev +name: nf-core-rnaseq-1.0 channels: - bioconda - conda-forge @@ -8,10 +8,10 @@ channels: dependencies: - conda-forge::openjdk=8.0.144 # Needed for FastQC - conda build hangs without this - fastqc=0.11.7 - - trim-galore=0.5 - - star=2.6.0c + - trim-galore=0.5.0 + - star=2.6.1a - hisat2=2.1.0 - - picard=2.18.7 + - picard=2.18.11 - bioconductor-dupradar=1.8.0 - conda-forge::r-data.table=1.11.4 - conda-forge::r-gplots=3.0.1 @@ -21,7 +21,7 @@ dependencies: - rseqc=2.6.4 - samtools=1.9 - stringtie=1.3.4 - - subread=1.6.1 + - subread=1.6.2 - gffread=0.9.9 + - deeptools=3.1.1 - multiqc=1.6 - - deeptools=3.1.1 \ No newline at end of file diff --git a/main.nf b/main.nf index 9098207b2..a1abc371c 100644 --- a/main.nf +++ b/main.nf @@ -13,10 +13,17 @@ */ def helpMessage() { - log.info""" - =================================== - nfcore/rnaseq ~ version ${params.version} - =================================== + log.info """ + ======================================================= + ,--./,-. + ___ __ __ __ ___ /,-._.--~\' + |\\ | |__ __ / ` / \\ |__) |__ } { + | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, + `._,._,\' + + nf-core/rnaseq : RNA-Seq Best Practice v${params.pipelineVersion} + ======================================================= + Usage: The typical command for running the pipeline is as follows: @@ -65,6 +72,7 @@ def helpMessage() { --clusterOptions Extra SLURM options, used in conjunction with Uppmax.config --maxMultiqcEmailFileSize Theshold size for MultiQC report to be attached in notification email. If file generated by pipeline exceeds the threshold, it will not be attached (Default: 25MB) -name Name for the pipeline run. If not specified, Nextflow will automatically generate a random mnemonic. + --seqCenter Add sequencing center in @RG line of output BAM header QC options: --skip_qc Skip all QC steps aside from MultiQC @@ -94,8 +102,8 @@ if (params.help){ } // Check if genome exists in the config file -if (!params.genomes.containsKey(params.genome) && params.genome) { - exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(", ")}" +if (params.genomes && params.genome && !params.genomes.containsKey(params.genome)) { + exit 1, "The provided genome '${params.genome}' is not available in the iGenomes file. Currently the available genomes are ${params.genomes.keySet().join(", ")}" } // Configurable variables @@ -111,6 +119,7 @@ params.hisat2_index = params.genome ? params.genomes[ params.genome ].hisat2 ?: params.multiqc_config = "$baseDir/assets/multiqc_config.yaml" params.email = false params.plaintext_email = false +params.seqCenter = false params.skip_qc = false params.skip_fastqc = false params.skip_rseqc = false @@ -244,7 +253,7 @@ log.info """======================================================= | \\| | \\__, \\__/ | \\ |___ \\`-._,-`-, `._,._,\' - nf-core/rnaseq : RNA-Seq Best Practice v${params.version} + nf-core/rnaseq : RNA-Seq Best Practice v${params.pipelineVersion} =======================================================""" def summary = [:] summary['Run Name'] = custom_runName ?: workflow.runName @@ -294,20 +303,6 @@ log.info summary.collect { k,v -> "${k.padRight(15)}: $v" }.join("\n") log.info "=========================================" -// Check that Nextflow version is up to date enough -// try / throw / catch works for NF versions < 0.25 when this was implemented -try { - if( ! nextflow.version.matches(">= $params.nf_required_version") ){ - throw GroovyException('Nextflow version too old') - } -} catch (all) { - log.error "====================================================\n" + - " Nextflow version $params.nf_required_version required! You are running v$workflow.nextflow.version.\n" + - " Pipeline execution will continue, but things may break.\n" + - " Please run `nextflow self-update` to update Nextflow.\n" + - "============================================================" -} - // Show a big error message if we're running on the base config and an uppmax cluster if( workflow.profile == 'standard'){ if ( "hostname".execute().text.contains('.uppmax.uu.se') ) { @@ -575,6 +570,7 @@ if(params.aligner == 'star'){ script: prefix = reads[0].toString() - ~/(_R1)?(_trimmed)?(_val_1)?(\.fq)?(\.fastq)?(\.gz)?$/ def avail_mem = task.memory == null ? '' : "--limitBAMsortRAM ${task.memory.toBytes() - 100000000}" + seqCenter = params.seqCenter ? "--outSAMattrRGline ID:$prefix 'CN:$params.seqCenter'" : '' """ STAR --genomeDir $index \\ --sjdbGTFfile $gtf \\ @@ -585,14 +581,14 @@ if(params.aligner == 'star'){ --outSAMtype BAM SortedByCoordinate $avail_mem \\ --readFilesCommand zcat \\ --runDirPerm All_RWX \\ - --outFileNamePrefix $prefix + --outFileNamePrefix $prefix $seqCenter \\ """ } // Filter removes all 'aligned' channels that fail the check star_aligned .filter { logs, bams -> check_log(logs) } .flatMap { logs, bams -> bams } - .into { bam_count; bam_rseqc; bam_preseq; bam_markduplicates; bam_featurecounts; bam_stringtieFPKM; bam_forSubsamp; bam_skipSubsamp } + .into { bam_count; bam_rseqc; bam_preseq; bam_markduplicates; bam_featurecounts; bam_stringtieFPKM; bam_for_genebody } } @@ -625,6 +621,7 @@ if(params.aligner == 'hisat2'){ script: index_base = hs2_indices[0].toString() - ~/.\d.ht2/ prefix = reads[0].toString() - ~/(_R1)?(_trimmed)?(_val_1)?(\.fq)?(\.fastq)?(\.gz)?$/ + seqCenter = params.seqCenter ? "--rg-id ${prefix} --rg CN:${params.seqCenter.replaceAll('\\s','_')}" : '' def rnastrandness = '' if (forward_stranded && !unstranded){ rnastrandness = params.singleEnd ? '--rna-strandness F' : '--rna-strandness FR' @@ -640,7 +637,7 @@ if(params.aligner == 'hisat2'){ -p ${task.cpus} \\ --met-stderr \\ --new-summary \\ - --summary-file ${prefix}.hisat2_summary.txt \\ + --summary-file ${prefix}.hisat2_summary.txt $seqCenter \\ | samtools view -bS -F 4 -F 256 - > ${prefix}.bam """ } else { @@ -655,7 +652,7 @@ if(params.aligner == 'hisat2'){ -p ${task.cpus} \\ --met-stderr \\ --new-summary \\ - --summary-file ${prefix}.hisat2_summary.txt \\ + --summary-file ${prefix}.hisat2_summary.txt $seqCenter \\ | samtools view -bS -F 4 -F 8 -F 256 - > ${prefix}.bam """ } @@ -675,7 +672,7 @@ if(params.aligner == 'hisat2'){ file wherearemyfiles output: - file "${hisat2_bam.baseName}.sorted.bam" into bam_count, bam_rseqc, bam_preseq, bam_markduplicates, bam_featurecounts, bam_stringtieFPKM, bam_forSubsamp, bam_skipSubsamp + file "${hisat2_bam.baseName}.sorted.bam" into bam_count, bam_rseqc, bam_preseq, bam_markduplicates, bam_featurecounts, bam_stringtieFPKM, bam_for_genebody file "where_are_my_files.txt" script: @@ -748,36 +745,35 @@ process rseqc { """ } + /* - * Step 4.1 Subsample the BAM files if necessary + * Step 4.1 Rseqc create BigWig coverage */ -bam_forSubsamp - .filter { it.size() > params.subsampFilesizeThreshold } - .map { [it, params.subsampFilesizeThreshold / it.size() ] } - .set{ bam_forSubsampFiltered } -bam_skipSubsamp - .filter { it.size() <= params.subsampFilesizeThreshold } - .set{ bam_skipSubsampFiltered } -process bam_subsample { - tag "${bam.baseName - '.sorted'}" + +process createBigWig { + tag "${bam.baseName - 'sortedByCoord.out'}" + publishDir "${params.outdir}/bigwig", mode: 'copy' + + when: + !params.skip_qc && !params.skip_genebody_coverage input: - set file(bam), val(fraction) from bam_forSubsampFiltered + file bam from bam_for_genebody output: - file "*_subsamp.bam" into bam_subsampled + file "*.bigwig" into bigwig_for_genebody script: """ - samtools view -s $fraction -b $bam | samtools sort -o ${bam.baseName}_subsamp.bam + samtools index $bam + bamCoverage -b $bam -p ${task.cpus} -o ${bam.baseName}.bigwig """ } - /* * Step 4.2 Rseqc genebody_coverage */ process genebody_coverage { - tag "${bam.baseName - '.sorted'}" + tag "${bigwig.baseName}" publishDir "${params.outdir}/rseqc" , mode: 'copy', saveAs: {filename -> if (filename.indexOf("geneBodyCoverage.curves.pdf") > 0) "geneBodyCoverage/$filename" @@ -791,7 +787,7 @@ process genebody_coverage { !params.skip_qc && !params.skip_genebody_coverage input: - file bam from bam_subsampled.concat(bam_skipSubsampFiltered) + file bigwig from bigwig_for_genebody file bed12 from bed_genebody_coverage.collect() output: @@ -799,12 +795,10 @@ process genebody_coverage { script: """ - samtools index $bam - geneBody_coverage.py \\ - -i $bam \\ - -o ${bam.baseName}.rseqc \\ + geneBody_coverage2.py \\ + -i $bigwig \\ + -o ${bigwig.baseName}.rseqc.txt \\ -r $bed12 - mv log.txt ${bam.baseName}.rseqc.log.txt """ } @@ -855,7 +849,7 @@ process markDuplicates { avail_mem = task.memory.toGiga() } """ - picard MarkDuplicates \\ + picard -Xmx${avail_mem}g MarkDuplicates \\ INPUT=$bam \\ OUTPUT=${bam.baseName}.markDups.bam \\ METRICS_FILE=${bam.baseName}.markDups_metrics.txt \\ @@ -1051,7 +1045,7 @@ process get_software_versions { script: """ - echo $params.version &> v_ngi_rnaseq.txt + echo $params.pipelineVersion &> v_ngi_rnaseq.txt echo $workflow.nextflow.version &> v_nextflow.txt fastqc --version &> v_fastqc.txt cutadapt --version &> v_cutadapt.txt @@ -1061,6 +1055,7 @@ process get_software_versions { stringtie --version &> v_stringtie.txt preseq &> v_preseq.txt read_duplication.py --version &> v_rseqc.txt + echo \$(bamCoverage --version 2>&1) > v_deeptools.txt featureCounts -v &> v_featurecounts.txt picard MarkDuplicates --version &> v_markduplicates.txt || true samtools --version &> v_samtools.txt @@ -1166,7 +1161,7 @@ workflow.onComplete { subject = "[nfcore/rnaseq] FAILED: $workflow.runName" } def email_fields = [:] - email_fields['version'] = params.version + email_fields['version'] = params.pipelineVersion email_fields['runName'] = custom_runName ?: workflow.runName email_fields['success'] = workflow.success email_fields['dateComplete'] = workflow.complete @@ -1252,18 +1247,6 @@ workflow.onComplete { log.info "[nfcore/rnaseq] Pipeline Complete" - try { - if( ! nextflow.version.matches(">= $params.nf_required_version") ){ - throw GroovyException('Nextflow version too old') - } - } catch (all) { - log.error "====================================================\n" + - " Nextflow version $params.nf_required_version required! You are running v$workflow.nextflow.version.\n" + - " Please be extra careful with pipeline results.\n" + - " Run `nextflow self-update` to update Nextflow.\n" + - "============================================================" - } - if(!workflow.success){ if( workflow.profile == 'standard'){ if ( "hostname".execute().text.contains('.uppmax.uu.se') ) { diff --git a/nextflow.config b/nextflow.config index 9de09a9b6..50aa65607 100644 --- a/nextflow.config +++ b/nextflow.config @@ -10,35 +10,43 @@ // Global default params, used in configs params { - version = '1.0dev' //Pipeline version - nf_required_version = '0.30.1' //Minimum version of Nextflow required - container = 'nfcore/rnaseq:latest' // Container slug. Stable releases should specify release tag! + container = 'nfcore/rnaseq:1.0' // Container slug. Stable releases should specify release tag! // Pipeline Options aligner = 'star' + genome = false forward_stranded = false reverse_stranded = false unstranded = false splicesites = false outdir = './results' - hisatBuildMemory = 200 // Required amount of memory in GB to build HISAT2 index with splice sites - subsampFilesizeThreshold = 10000000000 // Don't subsample BAMs for RSeQC gene_body_coverage if less than this saveReference = false saveTrimmed = false saveAlignedIntermediates = false singleEnd = false reads = "data/*{1,2}.fastq.gz" outdir = './results' + // Custom trimming options clip_r1 = 0 clip_r2 = 0 three_prime_clip_r1 = 0 three_prime_clip_r2 = 0 - sampleLevel = false - clusterOptions = false + + // AWS Batch awsqueue = false awsregion = 'eu-west-1' + + // Defaults + sampleLevel = false + clusterOptions = false + hisatBuildMemory = 200 // Required amount of memory in GB to build HISAT2 index with splice sites + subsampFilesizeThreshold = 10000000000 // Don't subsample BAMs for RSeQC gene_body_coverage if less than this + maxMultiqcEmailFileSize = 25.MB + readPaths = null tracedir = "${params.outdir}/pipeline_info" + // TODO: Remove this if/when we can. See https://github.com/nextflow-io/nextflow/issues/840 + pipelineVersion = '1.0' } profiles { @@ -130,9 +138,14 @@ dag { } manifest { - homePage = 'https://github.com/nf-core/rnaseq' + name = 'nf-core/rnaseq' description = 'Nextflow RNA-Seq analysis pipeline, part of the nf-core community.' + homePage = 'https://github.com/nf-core/rnaseq' + author = 'Phil Ewels, Rickard Hammarén' + // TODO: Define only here if/when we can. See https://github.com/nextflow-io/nextflow/issues/840 + pipelineVersion = params.pipelineVersion mainScript = 'main.nf' + nextflowVersion = '>=0.31.1' } // Function to ensure that resource requirements don't go beyond