diff --git a/README.md b/README.md index fb74ba8..524f64b 100644 --- a/README.md +++ b/README.md @@ -22,14 +22,18 @@ **nf-core/rangeland** is a geographical best-practice analysis pipeline for remotely sensed imagery. The pipeline processes satellite imagery alongside auxiliary data in multiple steps to arrive at a set of trend files related to land-cover changes. The main pipeline steps are: -1. Read satellite imagery, digital elevation model, endmember definition, water vapor database and area of interest definition +1. Read satellite imagery, digital elevation model (dem), endmember definition, water vapor database (wvdb), datacube definition and area of interest definition (aoi) 2. Generate allow list and analysis mask to determine which pixels from the satellite data can be used -3. Preprocess data to obtain atmospherically corrected images alongside quality assurance information -4. Classify pixels by applying linear spectral unmixing -5. Time series analyses to obtain trends in vegetation dynamics -6. Create mosaic and pyramid visualizations of the results +3. Preprocess data to obtain atmospherically corrected images alongside quality assurance information (aka. level 2 analysis read data) +4. Merge spatially and temporally overlapping preprocessed data +5. Classify pixels by applying linear spectral unmixing +6. Time series analyses to obtain trends in vegetation dynamics to derive level 3 data +7. Create mosaic and pyramid visualizations of the results +8. Version reporting with MultiQC ([`MultiQC`](http://multiqc.info/)) -7. Present QC results ([`MultiQC`](http://multiqc.info/)) +
+ +
## Usage @@ -37,8 +41,7 @@ The pipeline processes satellite imagery alongside auxiliary data in multiple st > If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. > Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data. -To run the pipeline on real data, input data needs to be acquired. -Concretely, satellite imagery, water vapor data, a digital elevation model, endmember definitions, a datacube specification, and a area-of-interest specification are required. +To run, satellite imagery, water vapor data, a digital elevation model, endmember definitions, a datacube specification, and a area-of-interest specification are required as input data. Please refer to the [usage documentation](https://nf-co.re/rangeland/usage) for details on the input structure. Now, you can run the pipeline using: diff --git a/docs/images/rangeland_diagram.png b/docs/images/rangeland_diagram.png new file mode 100644 index 0000000..84374ee Binary files /dev/null and b/docs/images/rangeland_diagram.png differ diff --git a/nextflow_schema.json b/nextflow_schema.json index 173a047..13a0303 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -34,7 +34,7 @@ "fa_icon": "fas fa-burn", "exists": true, "description": "Water vapor dataset.", - "help_text": "Directory containing a number text files describing global water vapor data at different timestamps, and a coordinate order (`.coo`-)file containing the reference system of the water vapor data. See [usage documentation](https://nf-co.re/rangeland/docs/usage) for details regarding the required structure.\n\nAlternatively, a tarball can be supplied. The same directory structure needs to be in place in that tarball.", + "help_text": "Directory containing a number text files describing global water vapor data at different timestamps, and a coordinate order (`.coo`-)file containing the reference system of the water vapor data. See [usage documentation](https://nf-co.re/rangeland/docs/usage) for details regarding the required structure.\n\nAlternatively, a tarball can be supplied. The same directory structure needs to be in place in that tarball.", "format": "path" }, "data_cube": { @@ -137,7 +137,7 @@ "indexes": { "type": "string", "default": "NDVI BLUE GREEN RED NIR SWIR1 SWIR2", - "help_text": "Space-separated list of indexes and bands that should be considered in time series analyses. They are indicated by using their established abbreviations. The full list of available indexes is available at https://force-eo.readthedocs.io/en/latest/components/higher-level/tsa/param.html under the 'INDEX' parameter. Spectral unmixing is a special index and always activated.", + "help_text": "Space-separated list of indexes and bands that should be considered in time series analyses. They are indicated by using their established abbreviations. The full list of available indexes is available at [https://force-eo.readthedocs.io/en/latest/components/higher-level/tsa/param.html](https://force-eo.readthedocs.io/en/latest/components/higher-level/tsa/param.html) under the 'INDEX' parameter. Spectral unmixing is a special index and always activated.", "description": "Select which bands and indexes should be considered in time series analyses.", "fa_icon": "fas fa-satellite", "pattern": "((BLUE|GREEN|RED|NIR|SWIR1|SWIR2|RE1|RE2|RE3|BNIR|NDVI|EVI|NBR|NDTI|ARVI|SAVI|SARVI|TC-BRIGHT|TC-GREEN|TC-WET|TC-DI|NDBI|NDWI|MNDWI|NDMI|NDSI|SMA|kNDVI|NDRE1|NDRE2|CIre|NDVIre1|NDVIre2|NDVIre3|NDVIre1n|NDVIre2n|NDVIre3n|MSRre|MSRren,CCI)(\\s|$))+" diff --git a/subworkflows/local/higher_level.nf b/subworkflows/local/higher_level.nf index 1891ae0..9c9fe43 100644 --- a/subworkflows/local/higher_level.nf +++ b/subworkflows/local/higher_level.nf @@ -39,9 +39,7 @@ workflow HIGHER_LEVEL { // main processing FORCE_HIGHER_LEVEL( HIGHER_LEVEL_CONFIG.out.higher_level_configs_and_data ) ch_versions = ch_versions.mix(FORCE_HIGHER_LEVEL.out.versions.first()) - trend_files = FORCE_HIGHER_LEVEL.out.trend_files.flatten().map{ x -> [ x.simpleName.substring(12), x ] } - trend_files_mosaic = trend_files.groupTuple() // visualizations diff --git a/subworkflows/local/preprocessing.nf b/subworkflows/local/preprocessing.nf index 0ea00d2..8690157 100644 --- a/subworkflows/local/preprocessing.nf +++ b/subworkflows/local/preprocessing.nf @@ -4,9 +4,6 @@ include { PREPROCESS_CONFIG } from '../../modules/local/pre include { FORCE_PREPROCESS } from '../../modules/local/force-preprocess/main' include { MERGE as MERGE_BOA; MERGE as MERGE_QAI } from '../../modules/local/merge/main' -// Closure to extract the parent directory of a file -def extractDirectory = { it.parent.toString().substring(it.parent.toString().lastIndexOf('/') + 1 ) } - workflow PREPROCESSING { take: @@ -20,6 +17,9 @@ workflow PREPROCESSING { main: + // Closure to extract the parent directory of a file + def extractDirectory = { it.parent.toString().substring(it.parent.toString().lastIndexOf('/') + 1 ) } + ch_versions = Channel.empty() FORCE_GENERATE_TILE_ALLOW_LIST( aoi_file, cube_file ) diff --git a/workflows/rangeland.nf b/workflows/rangeland.nf index 905b9f4..46a4f6c 100644 --- a/workflows/rangeland.nf +++ b/workflows/rangeland.nf @@ -33,34 +33,25 @@ include { HIGHER_LEVEL } from '../subworkflows/local/higher_level' // include { UNTAR as UNTAR_INPUT; UNTAR as UNTAR_DEM; UNTAR as UNTAR_WVDB; UNTAR as UNTAR_REF } from '../modules/nf-core/untar/main' -/* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - HELPER FUNCTIONS -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -*/ - - -// check whether provided input is within provided time range -def inRegion = input -> { - Integer date = input.simpleName.split("_")[3] as Integer - Integer start = params.start_date.replace('-','') as Integer - Integer end = params.end_date.replace('-','') as Integer - - return date >= start && date <= end -} - /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RUN MAIN WORKFLOW ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ - - workflow RANGELAND { main: + // checks whether provided input is within provided time range + def inRegion = { + Integer date = it.simpleName.split("_")[3] as Integer + Integer start = params.start_date.replace('-','') as Integer + Integer end = params.end_date.replace('-','') as Integer + + return date >= start && date <= end + } + ch_versions = Channel.empty() ch_multiqc_files = Channel.empty() // @@ -89,12 +80,12 @@ workflow RANGELAND { .set{ ch_input_types } UNTAR_INPUT(ch_input_types.archives) - ch_untared_inputs = UNTAR_INPUT.out.untar.map(it -> it[1]) + ch_untared_inputs = UNTAR_INPUT.out.untar.map{ it[1] } tar_versions = tar_versions.mix(UNTAR_INPUT.out.versions) data = data .mix(ch_untared_inputs, ch_input_types.dirs) - .map(it -> file("$it/*/*", type: 'dir')).flatten() + .map{ file("$it/*/*", type: 'dir') }.flatten() .filter{ inRegion(it) } // Determine type of params.dem and extract when neccessary @@ -108,7 +99,7 @@ workflow RANGELAND { .set{ ch_dem_types } UNTAR_DEM(ch_dem_types.archives) - ch_untared_dem = UNTAR_DEM.out.untar.map(it -> it[1]) + ch_untared_dem = UNTAR_DEM.out.untar.map{ it[1] } tar_versions = tar_versions.mix(UNTAR_DEM.out.versions) dem = dem.mix(ch_untared_dem, ch_dem_types.dirs).first() @@ -124,7 +115,7 @@ workflow RANGELAND { .set{ ch_wvdb_types } UNTAR_WVDB(ch_wvdb_types.archives) - ch_untared_wvdb = UNTAR_WVDB.out.untar.map(it -> it[1]) + ch_untared_wvdb = UNTAR_WVDB.out.untar.map{ it[1] } tar_versions = tar_versions.mix(UNTAR_WVDB.out.versions) wvdb = wvdb.mix(ch_untared_wvdb, ch_wvdb_types.dirs).first()