[med-svn] [r-cran-luminescence] 04/07: New upstream version 0.7.5

Andreas Tille tille at debian.org
Tue Oct 10 17:08:13 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-luminescence.

commit 57c7309b38f08b454d1c2200eeffc54285bd34a0
Author: Andreas Tille <tille at debian.org>
Date:   Tue Oct 10 19:01:06 2017 +0200

    New upstream version 0.7.5
---
 DESCRIPTION                                  | 101 ++--
 MD5                                          | 437 ++++++++------
 NAMESPACE                                    |  36 +-
 NEWS                                         | 489 +++++++++++++++-
 R/CW2pHMi.R                                  | 126 ++--
 R/CW2pLM.R                                   |   7 +-
 R/CW2pLMi.R                                  | 113 ++--
 R/CW2pPMi.R                                  | 106 ++--
 R/Luminescence-package.R                     | 167 +++++-
 R/PSL2Risoe.BINfileData.R                    | 186 ++++++
 R/RLum.Analysis-class.R                      | 167 +++++-
 R/RLum.Data.Curve-class.R                    | 104 +++-
 R/RcppExports.R                              |   8 +-
 R/Risoe.BINfileData2RLum.Analysis.R          |  26 +-
 R/Risoe.BINfileData2RLum.Data.Curve.R        |  84 +--
 R/RisoeBINfileData-class.R                   | 235 +++++---
 R/analyse_FadingMeasurement.R                | 821 +++++++++++++++++++++++++++
 R/analyse_IRSAR.RF.R                         | 510 +++++++++++++----
 R/analyse_SAR.CWOSL.R                        |  63 +-
 R/analyse_SAR.TL.R                           | 342 ++++++-----
 R/analyse_baSAR.R                            |  49 +-
 R/analyse_portableOSL.R                      | 261 +++++++++
 R/app_RLum.R                                 |   6 +-
 R/calc_AliquotSize.R                         | 137 +++--
 R/calc_AverageDose.R                         | 517 +++++++++++++++++
 R/calc_CentralDose.R                         |  16 +-
 R/calc_CommonDose.R                          |  20 +-
 R/calc_CosmicDoseRate.R                      |  83 +--
 R/calc_FadingCorr.R                          |  17 +-
 R/calc_FastRatio.R                           |  37 +-
 R/calc_FiniteMixture.R                       |   2 +-
 R/calc_Kars2008.R                            | 725 +++++++++++++++++++++++
 R/calc_MaxDose.R                             |  96 +++-
 R/calc_MinDose.R                             | 165 ++++--
 R/calc_OSLLxTxRatio.R                        |  64 ++-
 R/calc_Statistics.R                          |  26 +-
 R/calc_TLLxTxRatio.R                         |  60 +-
 R/calc_gSGC.R                                |   2 +-
 R/convert_BIN2CSV.R                          | 115 ++++
 R/convert_Daybreak2CSV.R                     |  91 +++
 R/convert_PSL2CSV.R                          |  94 +++
 R/convert_XSYG2CSV.R                         |  97 ++++
 R/extract_IrradiationTimes.R                 |  57 +-
 R/fit_CWCurve.R                              |  30 +-
 R/fit_LMCurve.R                              |  17 +-
 R/get_Layout.R                               |  14 +-
 R/get_Quote.R                                |   4 +-
 R/get_RLum.R                                 |  75 ++-
 R/github.R                                   | 217 +++++++
 R/install_DevelopmentVersion.R               | 114 ++++
 R/internals_RLum.R                           | 173 ++++++
 R/merge_RLum.Data.Curve.R                    |  23 +-
 R/merge_Risoe.BINfileData.R                  |  13 +-
 R/methods_RLum.R                             |  34 +-
 R/model_LuminescenceSignals.R                |  18 +-
 R/plot_AbanicoPlot.R                         |   9 +-
 R/plot_DRTResults.R                          |  12 +-
 R/plot_DetPlot.R                             |   6 +-
 R/plot_FilterCombinations.R                  | 197 +++++--
 R/plot_GrowthCurve.R                         | 688 ++++++++++++++++------
 R/plot_KDE.R                                 |  84 +--
 R/plot_RLum.Analysis.R                       | 148 +++--
 R/plot_RLum.Data.Curve.R                     |  72 +--
 R/plot_RLum.Data.Spectrum.R                  |  87 ++-
 R/plot_RLum.R                                |  14 +-
 R/plot_RLum.Results.R                        |   6 +-
 R/plot_RadialPlot.R                          | 176 +++---
 R/plot_ViolinPlot.R                          |   7 +-
 R/read_BIN2R.R                               | 150 +++--
 R/read_Daybreak2R.R                          | 469 +++++++++++----
 R/read_PSL2R.R                               | 313 ++++++++++
 R/read_SPE2R.R                               |   3 +-
 R/read_XSYG2R.R                              |  19 +-
 R/set_RLum.R                                 |   9 +-
 R/set_Risoe.BINfileData.R                    |  11 +-
 R/smooth_RLum.R                              |  73 +++
 R/template_DRAC.R                            |  22 +-
 R/use_DRAC.R                                 | 151 +++--
 R/verify_SingleGrainData.R                   |  16 +-
 R/write_R2BIN.R                              |  28 +-
 R/write_RLum2CSV.R                           | 240 ++++++++
 R/zzz.R                                      |   2 +-
 data/ExampleData.BINfileData.RData           | Bin 344081 -> 688543 bytes
 data/ExampleData.Fading.RData                | Bin 0 -> 2778 bytes
 data/ExampleData.portableOSL.RData           | Bin 0 -> 32632 bytes
 data/datalist                                |  22 +-
 inst/CITATION                                |   9 +-
 inst/NEWS.Rd                                 | 639 ++++++++++++++++++++-
 man/Analyse_SAR.OSLdata.Rd                   |  17 +-
 man/BaseDataSet.CosmicDoseRate.Rd            |   2 +-
 man/CW2pHMi.Rd                               |  19 +-
 man/CW2pLM.Rd                                |  17 +-
 man/CW2pLMi.Rd                               |  19 +-
 man/CW2pPMi.Rd                               |  19 +-
 man/ExampleData.BINfileData.Rd               |   2 +-
 man/ExampleData.CW_OSL_Curve.Rd              |   1 -
 man/ExampleData.DeValues.Rd                  |   1 -
 man/ExampleData.Fading.Rd                    |  93 +++
 man/ExampleData.FittingLM.Rd                 |   1 -
 man/ExampleData.LxTxData.Rd                  |   1 -
 man/ExampleData.LxTxOSLData.Rd               |   1 -
 man/ExampleData.RLum.Analysis.Rd             |   2 +-
 man/ExampleData.RLum.Data.Image.Rd           |   2 +-
 man/ExampleData.XSYG.Rd                      |   2 +-
 man/ExampleData.portableOSL.Rd               |  25 +
 man/GitHub-API.Rd                            | 100 ++++
 man/Luminescence-package.Rd                  |  76 +--
 man/PSL2Risoe.BINfileData.Rd                 |  65 +++
 man/RLum-class.Rd                            |  15 +-
 man/RLum.Analysis-class.Rd                   |  44 +-
 man/RLum.Data-class.Rd                       |   8 +-
 man/RLum.Data.Curve-class.Rd                 |  46 +-
 man/RLum.Data.Image-class.Rd                 |  19 +-
 man/RLum.Data.Spectrum-class.Rd              |  19 +-
 man/RLum.Results-class.Rd                    |  19 +-
 man/Risoe.BINfileData-class.Rd               |  65 ++-
 man/Risoe.BINfileData2RLum.Analysis.Rd       |  21 +-
 man/Second2Gray.Rd                           |  21 +-
 man/analyse_FadingMeasurement.Rd             | 149 +++++
 man/analyse_IRSAR.RF.Rd                      | 159 ++++--
 man/analyse_SAR.CWOSL.Rd                     |  22 +-
 man/analyse_SAR.TL.Rd                        |  27 +-
 man/analyse_baSAR.Rd                         |  28 +-
 man/analyse_pIRIRSequence.Rd                 |  17 +-
 man/analyse_portableOSL.Rd                   |  78 +++
 man/app_RLum.Rd                              |  11 +-
 man/apply_CosmicRayRemoval.Rd                |  17 +-
 man/apply_EfficiencyCorrection.Rd            |  17 +-
 man/as.Rd                                    |   9 +-
 man/bin_RLum.Data.Rd                         |  15 +-
 man/calc_AliquotSize.Rd                      |  34 +-
 man/calc_AverageDose.Rd                      | 143 +++++
 man/calc_CentralDose.Rd                      |  29 +-
 man/calc_CommonDose.Rd                       |  25 +-
 man/calc_CosmicDoseRate.Rd                   |  19 +-
 man/calc_FadingCorr.Rd                       |  23 +-
 man/calc_FastRatio.Rd                        |  30 +-
 man/calc_FiniteMixture.Rd                    |  17 +-
 man/calc_FuchsLang2001.Rd                    |  17 +-
 man/calc_HomogeneityTest.Rd                  |  15 +-
 man/calc_IEU.Rd                              |  19 +-
 man/calc_Kars2008.Rd                         | 180 ++++++
 man/calc_MaxDose.Rd                          |  98 ++--
 man/calc_MinDose.Rd                          | 148 +++--
 man/calc_OSLLxTxRatio.Rd                     |  31 +-
 man/calc_SourceDoseRate.Rd                   |  17 +-
 man/calc_Statistics.Rd                       |  21 +-
 man/calc_TLLxTxRatio.Rd                      |  34 +-
 man/calc_ThermalLifetime.Rd                  |  15 +-
 man/calc_gSGC.Rd                             |  15 +-
 man/convert_BIN2CSV.Rd                       |  56 ++
 man/convert_Daybreak2CSV.Rd                  |  52 ++
 man/convert_PSL2CSV.Rd                       |  53 ++
 man/convert_XSYG2CSV.Rd                      |  56 ++
 man/extract_IrradiationTimes.Rd              |  47 +-
 man/fit_CWCurve.Rd                           |  21 +-
 man/fit_LMCurve.Rd                           |  23 +-
 man/get_Layout.Rd                            |   9 +-
 man/get_Quote.Rd                             |   9 +-
 man/get_RLum.Rd                              |  22 +-
 man/get_Risoe.BINfileData.Rd                 |  15 +-
 man/get_rightAnswer.Rd                       |   9 +-
 man/install_DevelopmentVersion.Rd            |  44 ++
 man/length_RLum.Rd                           |  17 +-
 man/merge_RLum.Analysis.Rd                   |  17 +-
 man/merge_RLum.Data.Curve.Rd                 |  23 +-
 man/merge_RLum.Rd                            |  17 +-
 man/merge_RLum.Results.Rd                    |   9 +-
 man/merge_Risoe.BINfileData.Rd               |  23 +-
 man/methods_RLum.Rd                          | 114 ++--
 man/model_LuminescenceSignals.Rd             |  50 +-
 man/names_RLum.Rd                            |  17 +-
 man/plot_AbanicoPlot.Rd                      |  23 +-
 man/plot_DRTResults.Rd                       |  17 +-
 man/plot_DetPlot.Rd                          |  15 +-
 man/plot_FilterCombinations.Rd               |  49 +-
 man/plot_GrowthCurve.Rd                      |  42 +-
 man/plot_Histogram.Rd                        |  13 +-
 man/plot_KDE.Rd                              |  19 +-
 man/plot_NRt.Rd                              |  12 +-
 man/plot_RLum.Analysis.Rd                    |  32 +-
 man/plot_RLum.Data.Curve.Rd                  |  17 +-
 man/plot_RLum.Data.Image.Rd                  |  17 +-
 man/plot_RLum.Data.Spectrum.Rd               |  40 +-
 man/plot_RLum.Rd                             |  17 +-
 man/plot_RLum.Results.Rd                     |  17 +-
 man/plot_RadialPlot.Rd                       |  31 +-
 man/plot_Risoe.BINfileData.Rd                |  17 +-
 man/plot_ViolinPlot.Rd                       |  15 +-
 man/read_BIN2R.Rd                            |  31 +-
 man/read_Daybreak2R.Rd                       |  42 +-
 man/read_PSL2R.Rd                            |  80 +++
 man/read_SPE2R.Rd                            |  20 +-
 man/read_XSYG2R.Rd                           |  17 +-
 man/replicate_RLum.Rd                        |  15 +-
 man/report_RLum.Rd                           |  13 +-
 man/sTeve.Rd                                 |  12 +-
 man/set_RLum.Rd                              |  17 +-
 man/set_Risoe.BINfileData.Rd                 |  18 +-
 man/smooth_RLum.Rd                           |  74 +++
 man/structure_RLum.Rd                        |  17 +-
 man/template_DRAC.Rd                         |  14 +-
 man/tune_Data.Rd                             |  15 +-
 man/use_DRAC.Rd                              |  21 +-
 man/verify_SingleGrainData.Rd                |  31 +-
 man/write_R2BIN.Rd                           |  19 +-
 man/write_RLum2CSV.Rd                        |  81 +++
 src/Luminescence_init.c                      |  26 +
 src/RcppExports.cpp                          |  28 +-
 src/analyse_IRSARRF_SRS.cpp                  | 143 ++++-
 src/create_RLumDataCurve_matrix.cpp          | 115 ++++
 tests/testthat.R                             |   4 +
 tests/testthat/test_Analyse_SAROSLdata.R     |  17 +
 tests/testthat/test_CW2pX.R                  |  71 +++
 tests/testthat/test_PSL2RisoeBINfiledata.R   |  14 +
 tests/testthat/test_RisoeBINfileData-class.R |  16 +
 tests/testthat/test_Second2Gray.R            |  21 +
 tests/testthat/test_analyse_IRSARRF.R        |  54 ++
 tests/testthat/test_analyse_SARCWOSL.R       |  58 ++
 tests/testthat/test_analyse_SARTL.R          |  24 +
 tests/testthat/test_analyse_baSAR.R          |  51 ++
 tests/testthat/test_analyse_pIRIRSequence.R  |  52 ++
 tests/testthat/test_analyse_portableOSL.R    |  27 +
 tests/testthat/test_bin_RLumData.R           |  27 +
 tests/testthat/test_calc_AliquotSize.R       |  42 ++
 tests/testthat/test_calc_AverageDose.R       |  24 +
 tests/testthat/test_calc_CentralDose.R       |  27 +
 tests/testthat/test_calc_CommonDose.R        |  26 +
 tests/testthat/test_calc_CosmicDoseRate.R    |  57 ++
 tests/testthat/test_calc_FadingCorr.R        |  47 ++
 tests/testthat/test_calc_FastRatio.R         |  42 ++
 tests/testthat/test_calc_FiniteMixture.R     |  31 +
 tests/testthat/test_calc_FuchsLang2001.R     |  28 +
 tests/testthat/test_calc_HomogeneityTest.R   |  25 +
 tests/testthat/test_calc_IEU.R               |  25 +
 tests/testthat/test_calc_Kars2008.R          |  57 ++
 tests/testthat/test_calc_MaxDose.R           |  34 ++
 tests/testthat/test_calc_MinDose.R           |  33 ++
 tests/testthat/test_calc_OSLLxTxRatio.R      |  34 ++
 tests/testthat/test_calc_SourceDoseRate.R    |  23 +
 tests/testthat/test_calc_Statistics.R        |  87 +++
 tests/testthat/test_calc_TLLxTxRatio.R       |  47 ++
 tests/testthat/test_calc_ThermalLifetime.R   |  81 +++
 tests/testthat/test_calc_gSGC.R              |  28 +
 tests/testthat/test_convert_X2CSV.R          |  25 +
 tests/testthat/test_fit_CWCurve.R            |  24 +
 tests/testthat/test_fit_LMCurve.R            |  55 ++
 tests/testthat/test_get_RLum.R               |  26 +
 tests/testthat/test_merge_RLumDataCurve.R    |  22 +
 tests/testthat/test_merge_RisoeBINfileData.R |  19 +
 tests/testthat/test_names_RLum.R             |  10 +
 tests/testthat/test_plot_AbanicoPlot.R       | 166 ++++++
 tests/testthat/test_plot_Functions.R         | 136 +++++
 tests/testthat/test_plot_GrowthCurve.R       |  84 +++
 tests/testthat/test_read_BIN2R.R             |  47 ++
 tests/testthat/test_replicate_RLum.R         |  12 +
 tests/testthat/test_smooth_RLum.R            |  25 +
 tests/testthat/test_template_DRAC.R          |  13 +
 tests/testthat/test_verify_SingleGrainData.R |  14 +
 tests/testthat/test_write_R2BIN.R            |  57 ++
 tests/testthat/test_write_RLum2CSV.R         |  15 +
 tests/testthat/test_zzz.R                    |  16 +
 262 files changed, 14577 insertions(+), 2868 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index cc0f1b1..52eb959 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,76 +1,90 @@
 Package: Luminescence
 Type: Package
 Title: Comprehensive Luminescence Dating Data Analysis
-Version: 0.6.4
-Date: 2016-09-09
-Author: Sebastian Kreutzer [aut, trl, cre],
+Version: 0.7.5
+Date: 2017-06-30
+Author: Sebastian Kreutzer [aut, trl, cre, dtc],
     Michael Dietze [aut],
     Christoph Burow [aut, trl, dtc],
     Margret C. Fuchs [aut],
     Christoph Schmidt [aut],
     Manfred Fischer [aut, trl],
     Johannes Friedrich [aut],
-    Norbert Mercier [aut],
-    Rachel K. Smedley [aut],
-    Julie Durcan [aut],
-    Georgina King [aut],
+    Norbert Mercier [ctb],
+    Rachel K. Smedley [ctb],
+    Claire Christophe [ctb],
+    Antoine Zink [ctb],
+    Julie Durcan [ctb],
+    Georgina King [ctb, dtc],
+    Anne Philippe [ctb],
+    Guillaume Guerin [ctb],
     Markus Fuchs [ths]
 Authors at R: c(
-    person("Sebastian", "Kreutzer", role = c("aut", "trl", "cre"), email = "sebastian.kreutzer at u-bordeaux-montaigne.fr"),
+    person("Sebastian", "Kreutzer", role = c("aut", "trl", "cre", "dtc"), email = "sebastian.kreutzer at u-bordeaux-montaigne.fr"),
     person("Michael", "Dietze", role = c("aut")),
     person("Christoph", "Burow", role = c("aut", "trl", "dtc")),
     person("Margret C.", "Fuchs", role = c("aut")),
     person("Christoph", "Schmidt", role = c("aut")),
     person("Manfred", "Fischer", role = c("aut", "trl")),
     person("Johannes", "Friedrich", role = c("aut")),
-    person("Norbert", "Mercier", role = c("aut")),
-    person("Rachel K.", "Smedley", role = c("aut")),
-    person("Julie", "Durcan", role = c("aut")),
-    person("Georgina", "King", role = c("aut")),
+    person("Norbert", "Mercier", role = c("ctb")),
+    person("Rachel K.", "Smedley", role = c("ctb")),
+    person("Claire", "Christophe", role = c("ctb")),
+    person("Antoine", "Zink", rol = c("ctb")),
+    person("Julie", "Durcan", role = c("ctb")),
+    person("Georgina", "King", role = c("ctb", "dtc")),
+    person("Anne", "Philippe", role = c("ctb")),
+    person("Guillaume", "Guerin", role = c("ctb")),
     person("Markus", "Fuchs", role = c("ths")))
 Maintainer: Sebastian Kreutzer <sebastian.kreutzer at u-bordeaux-montaigne.fr>
 Description: A collection of various R functions for the purpose of Luminescence
     dating data analysis. This includes, amongst others, data import, export,
     application of age models, curve deconvolution, sequence analysis and
     plotting of equivalent dose distributions.
-Contact: Package Developer Team <team at r-luminescence.de>
+Contact: Package Developers <developers at r-luminescence.org>
 License: GPL-3
-Depends: R (>= 3.3.0), utils
-LinkingTo: Rcpp (>= 0.12.5), RcppArmadillo (>= 0.6.700.6.0)
-Imports: bbmle (>= 1.0.18), data.table (>= 1.9.6), httr (>= 1.1.0),
-        matrixStats (>= 0.50.2), methods, Rcpp (>= 0.12.5), minpack.lm
-        (>= 1.2-0), raster (>= 2.5-2), readxl (>= 0.1.1), shape (>=
-        1.4.2), parallel, XML (>= 3.98-1.4), zoo (>= 1.7-13)
-Suggests: RLumShiny (>= 0.1.0), RLumModel (>= 0.1.1), plotly (>=
-        3.4.13), rmarkdown (>= 0.9.6), rjags (>= 4-6), coda (>=
-        0.18-1), pander (>= 0.6.0), rstudioapi (>= 0.5)
+BugReports: https://github.com/R-Lum/Luminescence/issues
+Depends: R (>= 3.3.2), utils, magrittr (>= 1.5)
+LinkingTo: Rcpp (>= 0.12.9), RcppArmadillo (>= 0.7.600.1.0)
+Imports: bbmle (>= 1.0.18), data.table (>= 1.10.0), httr (>= 1.2.1),
+        matrixStats (>= 0.51.0), methods, minpack.lm (>= 1.2-1), raster
+        (>= 2.5-8), readxl (>= 0.1.1), shape (>= 1.4.2), parallel, XML
+        (>= 3.98-1.5), zoo (>= 1.7-14)
+Suggests: RLumShiny (>= 0.1.1), RLumModel (>= 0.1.2), plotly (>=
+        4.5.6), rmarkdown (>= 1.3), rjags (>= 4-6), coda (>= 0.19-1),
+        pander (>= 0.6.0), rstudioapi (>= 0.6), testthat (>= 1.0.2),
+        devtools (>= 1.12.0)
 URL: https://CRAN.R-project.org/package=Luminescence
 Collate: 'Analyse_SAR.OSLdata.R' 'CW2pHMi.R' 'CW2pLM.R' 'CW2pLMi.R'
-        'CW2pPMi.R' 'Luminescence-package.R' 'RcppExports.R'
-        'replicate_RLum.R' 'RLum-class.R' 'names_RLum.R'
-        'structure_RLum.R' 'length_RLum.R' 'set_RLum.R' 'get_RLum.R'
+        'CW2pPMi.R' 'Luminescence-package.R' 'PSL2Risoe.BINfileData.R'
+        'RcppExports.R' 'replicate_RLum.R' 'RLum-class.R'
+        'smooth_RLum.R' 'names_RLum.R' 'structure_RLum.R'
+        'length_RLum.R' 'set_RLum.R' 'get_RLum.R'
         'RLum.Analysis-class.R' 'RLum.Data-class.R' 'bin_RLum.Data.R'
         'RLum.Data.Curve-class.R' 'RLum.Data.Image-class.R'
         'RLum.Data.Spectrum-class.R' 'RLum.Results-class.R'
         'Risoe.BINfileData2RLum.Analysis.R'
         'Risoe.BINfileData2RLum.Data.Curve.R' 'set_Risoe.BINfileData.R'
         'get_Risoe.BINfileData.R' 'RisoeBINfileData-class.R'
-        'Second2Gray.R' 'analyse_IRSAR.RF.R' 'analyse_SAR.CWOSL.R'
-        'analyse_SAR.TL.R' 'analyse_baSAR.R' 'analyse_pIRIRSequence.R'
-        'app_RLum.R' 'apply_CosmicRayRemoval.R'
+        'Second2Gray.R' 'analyse_FadingMeasurement.R'
+        'analyse_IRSAR.RF.R' 'analyse_SAR.CWOSL.R' 'analyse_SAR.TL.R'
+        'analyse_baSAR.R' 'analyse_pIRIRSequence.R'
+        'analyse_portableOSL.R' 'app_RLum.R' 'apply_CosmicRayRemoval.R'
         'apply_EfficiencyCorrection.R' 'calc_AliquotSize.R'
-        'calc_CentralDose.R' 'calc_CommonDose.R'
+        'calc_AverageDose.R' 'calc_CentralDose.R' 'calc_CommonDose.R'
         'calc_CosmicDoseRate.R' 'calc_FadingCorr.R' 'calc_FastRatio.R'
         'calc_FiniteMixture.R' 'calc_FuchsLang2001.R'
-        'calc_HomogeneityTest.R' 'calc_IEU.R' 'calc_MaxDose.R'
-        'calc_MinDose.R' 'calc_OSLLxTxRatio.R' 'calc_SourceDoseRate.R'
-        'calc_Statistics.R' 'calc_TLLxTxRatio.R'
-        'calc_ThermalLifetime.R' 'calc_gSGC.R'
+        'calc_HomogeneityTest.R' 'calc_IEU.R' 'calc_Kars2008.R'
+        'calc_MaxDose.R' 'calc_MinDose.R' 'calc_OSLLxTxRatio.R'
+        'calc_SourceDoseRate.R' 'calc_Statistics.R'
+        'calc_TLLxTxRatio.R' 'calc_ThermalLifetime.R' 'calc_gSGC.R'
+        'convert_BIN2CSV.R' 'convert_Daybreak2CSV.R'
+        'convert_PSL2CSV.R' 'convert_XSYG2CSV.R'
         'extract_IrradiationTimes.R' 'fit_CWCurve.R' 'fit_LMCurve.R'
-        'get_Layout.R' 'get_Quote.R' 'get_rightAnswer.R'
-        'internal_as.latex.table.R' 'internals_RLum.R'
-        'merge_RLum.Analysis.R' 'merge_RLum.Data.Curve.R'
-        'merge_RLum.R' 'merge_RLum.Results.R'
+        'get_Layout.R' 'get_Quote.R' 'get_rightAnswer.R' 'github.R'
+        'install_DevelopmentVersion.R' 'internal_as.latex.table.R'
+        'internals_RLum.R' 'merge_RLum.Analysis.R'
+        'merge_RLum.Data.Curve.R' 'merge_RLum.R' 'merge_RLum.Results.R'
         'merge_Risoe.BINfileData.R' 'methods_DRAC.R' 'methods_RLum.R'
         'model_LuminescenceSignals.R' 'plot_AbanicoPlot.R'
         'plot_DRTResults.R' 'plot_DetPlot.R'
@@ -80,11 +94,12 @@ Collate: 'Analyse_SAR.OSLdata.R' 'CW2pHMi.R' 'CW2pLM.R' 'CW2pLMi.R'
         'plot_RLum.Data.Image.R' 'plot_RLum.Data.Spectrum.R'
         'plot_RLum.R' 'plot_RLum.Results.R' 'plot_RadialPlot.R'
         'plot_Risoe.BINfileData.R' 'plot_ViolinPlot.R' 'read_BIN2R.R'
-        'read_Daybreak2R.R' 'read_SPE2R.R' 'read_XSYG2R.R'
-        'report_RLum.R' 'template_DRAC.R' 'tune_Data.R' 'use_DRAC.R'
-        'verify_SingleGrainData.R' 'write_R2BIN.R' 'zzz.R'
-RoxygenNote: 5.0.1
+        'read_Daybreak2R.R' 'read_PSL2R.R' 'read_SPE2R.R'
+        'read_XSYG2R.R' 'report_RLum.R' 'template_DRAC.R' 'tune_Data.R'
+        'use_DRAC.R' 'verify_SingleGrainData.R' 'write_R2BIN.R'
+        'write_RLum2CSV.R' 'zzz.R'
+RoxygenNote: 6.0.1
 NeedsCompilation: yes
-Packaged: 2016-09-09 11:58:10 UTC; kreutzer
+Packaged: 2017-06-29 16:49:05 UTC; kreutzer
 Repository: CRAN
-Date/Publication: 2016-09-09 14:52:58
+Date/Publication: 2017-06-29 22:46:57 UTC
diff --git a/MD5 b/MD5
index 812ccc1..f78ce9f 100644
--- a/MD5
+++ b/MD5
@@ -1,221 +1,306 @@
-ad769e06062b5a3edbed7b3b63058ad3 *DESCRIPTION
-29d4399a175e491061b95554c8ba4324 *NAMESPACE
-167712857b8f4d609f964747ece2621b *NEWS
+fee691583fa62e94aedb818ff5f55a82 *DESCRIPTION
+9c882be96543c401e87ed863ae9b56e9 *NAMESPACE
+ace516015bafd2243e6ec0c99c88e6ee *NEWS
 c1a575eb8fba9c7a3eb7b7b54dfd3581 *R/Analyse_SAR.OSLdata.R
-e3abc05b7f74052b2eafd3855cb7df28 *R/CW2pHMi.R
-980bc4ac52eef3b4f1e840f761d63e2d *R/CW2pLM.R
-36283b60db7cd988f02cfc4beccf9f56 *R/CW2pLMi.R
-aade4defaaafa0a62f0adb5f18022990 *R/CW2pPMi.R
-bb43916f4f08f0e82ddc313de0fd1a2f *R/Luminescence-package.R
+fe51ad130bf2bbb04cae492ead8a4f13 *R/CW2pHMi.R
+b3014d95277600a216af72bddf760c5e *R/CW2pLM.R
+832ceb3c08be8001603e68609eee4d34 *R/CW2pLMi.R
+3158542c438e20efc95d0d11febf8e61 *R/CW2pPMi.R
+c95b98d6d530b9a10d47b1061d918e74 *R/Luminescence-package.R
+52ac915357d57c719408c1c700d96ca6 *R/PSL2Risoe.BINfileData.R
 f96b164b48d5343ecaaf4f9b84c74354 *R/RLum-class.R
-54e44e008c02c50942b3afa406494d8f *R/RLum.Analysis-class.R
+3243d42067da0defbaafd81af9c85ef1 *R/RLum.Analysis-class.R
 738d70f4534e1b5823b0b453e4f2035b *R/RLum.Data-class.R
-14df8e5f6bec944d186b9d6114d366bf *R/RLum.Data.Curve-class.R
+832f33ab18db374d81166734dc825d62 *R/RLum.Data.Curve-class.R
 4d18f10245b689a9bb4c88dbf243fe99 *R/RLum.Data.Image-class.R
 79955b868a4822f69b32eb2cb6c30189 *R/RLum.Data.Spectrum-class.R
 cbe9744df2ae9107ede141a02d84a6d8 *R/RLum.Results-class.R
-fe869556fc94aa48cd5a2bfab284dd20 *R/RcppExports.R
-91de57856b65a6649bdce203e7a5eb3e *R/Risoe.BINfileData2RLum.Analysis.R
-90207ff2e2fb8785f4ffc4ee6062600e *R/Risoe.BINfileData2RLum.Data.Curve.R
-c7a2b8661391bc793d466179a74e690a *R/RisoeBINfileData-class.R
+ae8d071b1f3fa935cd2816fb7afedd0d *R/RcppExports.R
+9987ef6d6281737152f6d10b1e8cb7a4 *R/Risoe.BINfileData2RLum.Analysis.R
+01fc00379960da16b1fbea1e4a8a1327 *R/Risoe.BINfileData2RLum.Data.Curve.R
+b5f8646dd82d8e1d0af40c3c42b91954 *R/RisoeBINfileData-class.R
 02853953b3a9798309fdd0a9901f096a *R/Second2Gray.R
-aee94ab4556298034b0d567a78adf26c *R/analyse_IRSAR.RF.R
-a50cc85e044b59abf8e082a939035f6c *R/analyse_SAR.CWOSL.R
-ac7c1b7fa5a5cc170f6f355159e910d0 *R/analyse_SAR.TL.R
-aa980706c80a418b5ebf190bbd5d926b *R/analyse_baSAR.R
+db99f3ba3a2bce843b206bd1f0c75334 *R/analyse_FadingMeasurement.R
+56213d954ebdb2a56c2e2f63a24a2087 *R/analyse_IRSAR.RF.R
+a5a443e0b55c20300b29e96d37d60a65 *R/analyse_SAR.CWOSL.R
+e532e62dcd642980943335e76aaf8590 *R/analyse_SAR.TL.R
+e256fa858f73e0cb82d2928df08e4f6a *R/analyse_baSAR.R
 ef51962e710f824771edfb8c1c304761 *R/analyse_pIRIRSequence.R
-4d59db74f2b3fc7e552a2acb280bfa98 *R/app_RLum.R
+841395d858eb4aaa9a38ba9dd3de2239 *R/analyse_portableOSL.R
+ad3202e295e47a6dedd02134058fe392 *R/app_RLum.R
 e8bf70cc879f026a61634bd74d0fc0c7 *R/apply_CosmicRayRemoval.R
 bf6ff653d8a02380264316f3d5758031 *R/apply_EfficiencyCorrection.R
 d27071ceeee72ccb2306634d52367f5f *R/bin_RLum.Data.R
-f9a945af29b554ddc3fa51bb651b47d6 *R/calc_AliquotSize.R
-bbf5f5197d32499f73adf831a3e178c5 *R/calc_CentralDose.R
-627bb5b760cec008b5d725edab15995d *R/calc_CommonDose.R
-9ea4691d8050dd190d1d86c2682fd993 *R/calc_CosmicDoseRate.R
-0aa6f92b7dc89a9dabf468159e0e404e *R/calc_FadingCorr.R
-9764f24c652abdbfa450a989306bedd2 *R/calc_FastRatio.R
-88aeee30c02f49b9b2d50a636bceff1d *R/calc_FiniteMixture.R
+b2e169fe1ae7665e5ef8f64a25dbe2e9 *R/calc_AliquotSize.R
+682e7fdf2c27dfb54cef0932b5a93c4f *R/calc_AverageDose.R
+7b7e4ba411523b50a88914b04a98b4e8 *R/calc_CentralDose.R
+71f7a26963d9085cfe1f937555fda654 *R/calc_CommonDose.R
+6b6aca5d64243c3d15a5732730fdbcff *R/calc_CosmicDoseRate.R
+3d4457ef7606c1d850eb6342fde8544f *R/calc_FadingCorr.R
+4026e5b04f6548cf04f330422ed01ce9 *R/calc_FastRatio.R
+a7f0ca0270d8a2ae14da4220b5c5b363 *R/calc_FiniteMixture.R
 46d7564604b36f3b4dacd7b6e041fe95 *R/calc_FuchsLang2001.R
 3a73c5b14ee3f97469707d72c43dbd05 *R/calc_HomogeneityTest.R
 e7887dbe151b36cbd3e11e436e1e3f60 *R/calc_IEU.R
-6694ae47a51fdc6ba2e07173f7cbb24c *R/calc_MaxDose.R
-fc9e69c883a32a1abadddb47833ca1c8 *R/calc_MinDose.R
-1c66a4302b86ee4dbeef4943c8eccad8 *R/calc_OSLLxTxRatio.R
+03162d3fcea368634e7cb1ccfe631806 *R/calc_Kars2008.R
+6b689e6afa9018dd84d557161e82aa86 *R/calc_MaxDose.R
+77c50d59e41c9575533efefc42e9a481 *R/calc_MinDose.R
+90eadb8337b7c1ac7131702fc776d520 *R/calc_OSLLxTxRatio.R
 2b0adde7f32ed5bfc4c18b1a3f0a9a51 *R/calc_SourceDoseRate.R
-910f5308f61c4dd2d6cdae887510976d *R/calc_Statistics.R
-e72d868f0bb0c5300149e652a4255e29 *R/calc_TLLxTxRatio.R
+0fd66f4ebe46029ea3e347af7c4d49a0 *R/calc_Statistics.R
+bcf66320202be5d07ea2d5b979b0cd31 *R/calc_TLLxTxRatio.R
 16add648f8e85c742ec636dd332898e9 *R/calc_ThermalLifetime.R
-2f2f72d9a72654d953b7789b437b4164 *R/calc_gSGC.R
-d8257bb5d050544905df5489d6fd1376 *R/extract_IrradiationTimes.R
-8b6cc8ec465126940f29fe1fbf353eec *R/fit_CWCurve.R
-9a0a68c1b241e7388bcd07ca958bd663 *R/fit_LMCurve.R
-d73bb484d61479632cd36fc76714e0df *R/get_Layout.R
-7c21ec1847ba7eefc70fa07022503a9b *R/get_Quote.R
-86c7bd983f6902f78e69624ce8ec0645 *R/get_RLum.R
+992fb95052ecb9461d77e17da325b2bd *R/calc_gSGC.R
+c6b1c9d5b39c821a221980b6f7e114a9 *R/convert_BIN2CSV.R
+b6a40759005ad56093a19bf4412efe27 *R/convert_Daybreak2CSV.R
+d614d4f1425e18bf5a166d03b5cffd0d *R/convert_PSL2CSV.R
+99e0aa7f7777734af34cdfbf71e781e3 *R/convert_XSYG2CSV.R
+51fea279e9066ae93b56b57baa9a8d56 *R/extract_IrradiationTimes.R
+de08281401431f33d08dcd1307734fba *R/fit_CWCurve.R
+63e1f199ee740d0a8e9c040dc79fcc9f *R/fit_LMCurve.R
+7afb7b0fd383ac3655acc2897e785701 *R/get_Layout.R
+d391bdc903d4d4d15198d8ae6fda8ef5 *R/get_Quote.R
+d36508b28592e5dc3c16431174c16402 *R/get_RLum.R
 90259b563bb83ef834d7ffed77dbd37f *R/get_Risoe.BINfileData.R
 b45bae48aecccf0556288eff0f8b60ca *R/get_rightAnswer.R
+d87af547dabdec25ad8b6280175e72c6 *R/github.R
+c55833b2ed9168bbb8afc4efb8d75ed9 *R/install_DevelopmentVersion.R
 173e3a6060036c32353ad079bd8964ba *R/internal_as.latex.table.R
-60511c58e3602b048c6131b88824a8f0 *R/internals_RLum.R
+2149235001750f594ff57ded4c542878 *R/internals_RLum.R
 e2b2ea53ad479b52b03771df74c003c3 *R/length_RLum.R
 221e79181b7da8308c0f178c96b1e3d2 *R/merge_RLum.Analysis.R
-9320718a23373e2c8940c8ed33e8ff02 *R/merge_RLum.Data.Curve.R
+8c62bc78bcfe58429e966a7fdff7a7be *R/merge_RLum.Data.Curve.R
 72b5981712a785746e35a60c6665ce8d *R/merge_RLum.R
 13175fd8d36cd775aab857c982a94998 *R/merge_RLum.Results.R
-a4c513bb9917ee68b16f5e463d1c6d6a *R/merge_Risoe.BINfileData.R
+7672d185716790c7e4e823643ccd36d2 *R/merge_Risoe.BINfileData.R
 290a2239f4642011957d6e7c80928a1d *R/methods_DRAC.R
-ea509faaf10077d2d5d6c8f64337b146 *R/methods_RLum.R
-d91a70974803ee1a11cf3ba5f637b926 *R/model_LuminescenceSignals.R
+4e6315cc2c686be89a6fb559bcb269f8 *R/methods_RLum.R
+0ac9615a33fe8b74719e412c6e319acf *R/model_LuminescenceSignals.R
 8145486bbfaea0afacf5fa95350b21b4 *R/names_RLum.R
-7d2124d991664742501b08f2803024b7 *R/plot_AbanicoPlot.R
-4322ddddbc85722e7379ce1d92b6da52 *R/plot_DRTResults.R
-43b3c9d49c0a558ab3df5546152dd550 *R/plot_DetPlot.R
-36f6cad21154487d237fb16e5c48217e *R/plot_FilterCombinations.R
-0f1fe20ef527d63c4cf0042c12f8c61e *R/plot_GrowthCurve.R
+4d6555e9e0166fb2fc93a93d3d0143aa *R/plot_AbanicoPlot.R
+129e185ed5534faaa27cae1a936a3465 *R/plot_DRTResults.R
+b602e69466d97cf3e0df380418a7df3f *R/plot_DetPlot.R
+55bec20d68e09461118a1cfebcf099e5 *R/plot_FilterCombinations.R
+75aa0aaca06ca659edc789fcce7abfad *R/plot_GrowthCurve.R
 019d96fea55d9b18f27c99d92cea10ad *R/plot_Histogram.R
-04101363fc122ac90230252c5190d57e *R/plot_KDE.R
+d93108d81d4ac01d211362274e987b64 *R/plot_KDE.R
 b270a4b63b7da7a20afd13a8144e5d1f *R/plot_NRt.R
-2d90ce5919b2b3944fa99490f09e473e *R/plot_RLum.Analysis.R
-a9048e93d962b33e55ae9ecd1e905060 *R/plot_RLum.Data.Curve.R
+e85af72f04ca4dfef50c5a27668d973c *R/plot_RLum.Analysis.R
+df6627a2e2b0ed5404600c89fdc24277 *R/plot_RLum.Data.Curve.R
 d6feaf083c303f395a3fd60a4551a6b9 *R/plot_RLum.Data.Image.R
-69d886955acd7a7db3ee3912a0b586e2 *R/plot_RLum.Data.Spectrum.R
-03cec951ade16cfb79a793266b6d5832 *R/plot_RLum.R
-e9f501c16d94132f6ea3fe1ad6cb9fa3 *R/plot_RLum.Results.R
-900e03933972b275fc312e0dbf22cdb6 *R/plot_RadialPlot.R
+ea2c175663e70925cfbc98c8216254f8 *R/plot_RLum.Data.Spectrum.R
+6d183edde109d45577109f78018a3f82 *R/plot_RLum.R
+17a082745c028d0ff9d1aa34abb3e7b7 *R/plot_RLum.Results.R
+f10f11e8a729a4134229a2131f576eea *R/plot_RadialPlot.R
 203eff338826b122c5fb22f723351c66 *R/plot_Risoe.BINfileData.R
-f6770bdbcf0064b3cb1c40c36ea741a8 *R/plot_ViolinPlot.R
-a50296fa37cf7bee171a22b31f29e1e1 *R/read_BIN2R.R
-07c94e3465db89666f5b3a5dbdf8cd31 *R/read_Daybreak2R.R
-a4f7e396d8b3e12d7f3d52f40254956d *R/read_SPE2R.R
-1e055bd1fbc5e07d54c995902841bfd1 *R/read_XSYG2R.R
+1b70b73f71a9821a7b012bf78cc1b4e9 *R/plot_ViolinPlot.R
+68206777a7cf11c5ad939d55f69bda0d *R/read_BIN2R.R
+45d5d4e6f1b1e5fd7ff124775d9dc06b *R/read_Daybreak2R.R
+11c089eeab4e0578431d0cb973771751 *R/read_PSL2R.R
+89afa696f01d621fa71a9b5f47416ce5 *R/read_SPE2R.R
+c907f9f6bded2bbeda24c6e2e6f9138b *R/read_XSYG2R.R
 123ed927a9bf2662bb6440022eab158c *R/replicate_RLum.R
 c1dba55660ac08dddc9c2bb26a7ff189 *R/report_RLum.R
-a34034da44c2e10436a9a033d1fc5e9a *R/set_RLum.R
-ad168661d4ea741ccf71544e0b5fc4d7 *R/set_Risoe.BINfileData.R
+2627ec62e84b9e1a7400b12e6d9d6b05 *R/set_RLum.R
+c6a85601b2522c8947d2d055cc57cf07 *R/set_Risoe.BINfileData.R
+6780702bcc2de54e289cc91bb1fccbc0 *R/smooth_RLum.R
 a8b9523cf0d7070ef8131ff6128fc0f6 *R/structure_RLum.R
-1a69763394a8fe7ed145ac7a7a886194 *R/template_DRAC.R
+dbfd45411254c63d115e02a79ff222b9 *R/template_DRAC.R
 a530148476bff7efc7a117e4c5f02eb0 *R/tune_Data.R
-6e75ef3d269317c01e90f1b83b616ba5 *R/use_DRAC.R
-0faf38a0a9fe9591c6f2a66311efdd26 *R/verify_SingleGrainData.R
-00992ba149e893f7cc98f8291ddddd62 *R/write_R2BIN.R
-891f53b99754d66521c894303a0d5cfb *R/zzz.R
+5759ea9cd03f04b70087a4e6c9f169f3 *R/use_DRAC.R
+89805a65d83562f1bea44db930e48165 *R/verify_SingleGrainData.R
+cefefa68bd56c729d49887dd68e1673a *R/write_R2BIN.R
+9e18b396152860bd267a665c5de31b95 *R/write_RLum2CSV.R
+03e2866d86ec98a42f5f2f675012101f *R/zzz.R
 8eb217fc4380f23781dac785d7690941 *data/BaseDataSet.CosmicDoseRate.RData
-aa6811a6273a8735ce38d2fa0356ef9e *data/ExampleData.BINfileData.RData
+4f98149ef7a155bd26679d398e5b619b *data/ExampleData.BINfileData.RData
 3e72ccbe5fef2feee752206fc52bd358 *data/ExampleData.CW_OSL_Curve.RData
 d6477245d9abca8d86d0eb6d1d1f319b *data/ExampleData.DeValues.RData
+e2f24d5d1ad528d9b8852c8c619fb611 *data/ExampleData.Fading.RData
 2688778759b5d9ddcd458c98421f5d36 *data/ExampleData.FittingLM.RData
 76abec3d75bbea44fac9f599c0de9f0f *data/ExampleData.LxTxData.RData
 efa094f829c940630aefef99d8eea775 *data/ExampleData.LxTxOSLData.RData
 dd79ddebf77e9d0f482470546512db58 *data/ExampleData.RLum.Analysis.RData
 ee4c8be21bfb1f15b4056edb4b160513 *data/ExampleData.RLum.Data.Image.RData
 c723aab7895f3f8394a10da6d0a6b16d *data/ExampleData.XSYG.RData
-7b4198deaeab8582031b5932341e1477 *data/datalist
-1dbbc084b671a0760f1fd0697d97c2b5 *inst/CITATION
-f09d77f958c2e55e605fe52d20bfc7ed *inst/NEWS.Rd
+b2e8b7542753c5968647130e26a83517 *data/ExampleData.portableOSL.RData
+b072f185fb5d9de2e34883f1f98c027c *data/datalist
+6fffef420c1f222939226e9e44ca0583 *inst/CITATION
+5e10ccf6f395fe896fd96c527fbc67a3 *inst/NEWS.Rd
 12ab72be52e77951d8b5e1ee97f4702e *inst/doc/S4classObjects.pdf
 a7018449ba9936ff3384170611f7c8e4 *inst/doc/index.html
-f6f96ddcffb234907a5048b82d41bdcb *man/Analyse_SAR.OSLdata.Rd
-39d2ec86949546906eba52f6ede31d9d *man/BaseDataSet.CosmicDoseRate.Rd
-9c3b94da5f2d1017c301e99d6fd3d85d *man/CW2pHMi.Rd
-fb86f24fff9409e66cf2243fab61f6e0 *man/CW2pLM.Rd
-875c29b363e184fef1f3647a4197a331 *man/CW2pLMi.Rd
-9c0c5c12473a243c85ae9932056388e2 *man/CW2pPMi.Rd
-7da15a0b7b859211f23e7de4ed267297 *man/ExampleData.BINfileData.Rd
-af667acdedf76cd45c6e5097007c5f47 *man/ExampleData.CW_OSL_Curve.Rd
-934d5e6fa6cf6ef550a947046d49a50f *man/ExampleData.DeValues.Rd
-2702625dd7e4625d5013ed890834ebf3 *man/ExampleData.FittingLM.Rd
-f374ef65f2950cfd41336f4c940123de *man/ExampleData.LxTxData.Rd
-09ab0799e419cf82dd43cb17876372a9 *man/ExampleData.LxTxOSLData.Rd
-99c1a16b5168800a2803d126e659e65b *man/ExampleData.RLum.Analysis.Rd
-12325ed0411eefbaa8e7e8cd6f1f4569 *man/ExampleData.RLum.Data.Image.Rd
-341baf0d297a76f22d55c0b3e0546701 *man/ExampleData.XSYG.Rd
-8a80f89e24da9c05d239466f9153e368 *man/Luminescence-package.Rd
-6fd39a2be77918f7d647ea3017be25fc *man/RLum-class.Rd
-1509c02b0431e792f34c1e11e0bef19a *man/RLum.Analysis-class.Rd
-d2d88a12194c0e2640385b9bd6073604 *man/RLum.Data-class.Rd
-4ed4f390c5240a11adbf3960287c0199 *man/RLum.Data.Curve-class.Rd
-af950947c45be19bc66f42838ab35663 *man/RLum.Data.Image-class.Rd
-66c6d3ed693969ac4ff595d709320e89 *man/RLum.Data.Spectrum-class.Rd
-771fd62edf01a1a1c8d18c8f8a88959d *man/RLum.Results-class.Rd
-124822d87d8653811e913ad6a95d6d9a *man/Risoe.BINfileData-class.Rd
-3e54a534f02460c0de9f8cff2834c29e *man/Risoe.BINfileData2RLum.Analysis.Rd
-f8f4446787186c76492df6472ceab4e6 *man/Second2Gray.Rd
-e3f3abaa6da3f478161ec4e350dfd5d6 *man/analyse_IRSAR.RF.Rd
-de1999d5f6a68ca98011300f51a22794 *man/analyse_SAR.CWOSL.Rd
-658a9de4dd4fcb271dfa0f8d4b0d9464 *man/analyse_SAR.TL.Rd
-42ef16376b7e252fbd03c4b1e2ea002d *man/analyse_baSAR.Rd
-c19c0d6247e8cc383d8aeae6f0717e31 *man/analyse_pIRIRSequence.Rd
-4b5ea17fff3d7a703760271ea081bdda *man/app_RLum.Rd
-efd324636ab5b62d8a7c79717d70bd53 *man/apply_CosmicRayRemoval.Rd
-def80e2dcc3ef4a85a099f529ac07a60 *man/apply_EfficiencyCorrection.Rd
-e367a6f98fcf482b0c3f56ce835ce672 *man/as.Rd
-1c5968fb3f86cfa213e2dcb90e619831 *man/bin_RLum.Data.Rd
-ada7f222b491152172466be0ad52064f *man/calc_AliquotSize.Rd
-5a7feee11b8306cec091458020c098fb *man/calc_CentralDose.Rd
-bb6b17e344490a7199b27895bd6abf3a *man/calc_CommonDose.Rd
-4d38b8beac23beae22b43e5dd5ec326c *man/calc_CosmicDoseRate.Rd
-e8bfc08f7fef6d70e17c83844d26441c *man/calc_FadingCorr.Rd
-5d02b979050b2f3a1640e5fbf7fc1105 *man/calc_FastRatio.Rd
-e007846a7d7be9464f0970b58506a6f9 *man/calc_FiniteMixture.Rd
-f1a9ff7b70f56a4b5c50193ac9d85d83 *man/calc_FuchsLang2001.Rd
-c0b16d0e2d98b7a2aa977740b849951f *man/calc_HomogeneityTest.Rd
-de7283b67e9cb3281d17b9771cc01a1c *man/calc_IEU.Rd
-2898cbc0cbba5cb6099a15ec93f90a42 *man/calc_MaxDose.Rd
-5952fd1adfcb1cc0f080a8beb5e9bc52 *man/calc_MinDose.Rd
-ce5efa7cbffacfea46f58ed5b827d8de *man/calc_OSLLxTxRatio.Rd
-5e295e0195875fa39673281c9a1ca00e *man/calc_SourceDoseRate.Rd
-109c9705a679c1d8b69cd1c3575e1c0c *man/calc_Statistics.Rd
-e3a3471405e8f5e3a007682a2afa0046 *man/calc_TLLxTxRatio.Rd
-cf2735f1e82d92b1b9b2c24de42bc03b *man/calc_ThermalLifetime.Rd
-ac0c563d4a0ccae3072c557319c0348a *man/calc_gSGC.Rd
-504d1e8cbb1b0fa2c63b52a856932934 *man/extract_IrradiationTimes.Rd
-08557bda7bba49c70c214bfdc3ed461b *man/fit_CWCurve.Rd
-f03d933d09467bd3337784902c0cdb4a *man/fit_LMCurve.Rd
-89cc198c98577c495eff803df48719dc *man/get_Layout.Rd
-31cce908e034e48c7bf919b314c2a593 *man/get_Quote.Rd
-ada62940c1f10fe005b1d457f46f012c *man/get_RLum.Rd
-8871034e7484e61454360ac349d25926 *man/get_Risoe.BINfileData.Rd
-29fb6b3419cac59f816b2096fe4d61fe *man/get_rightAnswer.Rd
-8dafe354753f27d8a2ea3a76f043be84 *man/length_RLum.Rd
-6e8ef969c68afb79324b021f0a6c5d32 *man/merge_RLum.Analysis.Rd
-37c54307c88869500dfd789d8b4f2c76 *man/merge_RLum.Data.Curve.Rd
-59bbea14359f1998c93ea0b7740e26a6 *man/merge_RLum.Rd
-d49663484d0887b6bac5e63ec68ca76a *man/merge_RLum.Results.Rd
-b1ac5a5a8cf88e909150d103fb20e156 *man/merge_Risoe.BINfileData.Rd
-02620a556fe2d098c09cac9e6f37abc1 *man/methods_RLum.Rd
-91ece2023bd21e008e68b4c34e943421 *man/model_LuminescenceSignals.Rd
-63dc8aae12ccda26f04a510daf6a5d58 *man/names_RLum.Rd
-ab0911053b849d28df131bb93cfeeeec *man/plot_AbanicoPlot.Rd
-4d389771a38651bda6c3b21cb76e8c2e *man/plot_DRTResults.Rd
-b6df2c9c5cb551bb607c56067e070c01 *man/plot_DetPlot.Rd
-4b51c3f5beec6b3f53a89afda462af75 *man/plot_FilterCombinations.Rd
-a238a35140698db5d70dc0161015397c *man/plot_GrowthCurve.Rd
-449cbfcf19feae6ba9555629616b28a5 *man/plot_Histogram.Rd
-d9a50f2d5115340e610f365318edc85d *man/plot_KDE.Rd
-76483428bb5fb7044dbe4092f9390296 *man/plot_NRt.Rd
-ef6b9a8c1a8a571d5ba0978619ca38e3 *man/plot_RLum.Analysis.Rd
-f5d46b22e8954b6f4558ee3a4a82cdd2 *man/plot_RLum.Data.Curve.Rd
-4669985f2700cfaf4464caff7bff880b *man/plot_RLum.Data.Image.Rd
-098f12925f9f1d0b4b385b312765918c *man/plot_RLum.Data.Spectrum.Rd
-ed6a34b90da65ddcb34e30891c4397a9 *man/plot_RLum.Rd
-8e93b0f6cc2acdaa54f3de39ba9f9e8f *man/plot_RLum.Results.Rd
-4189844c293b23aaa1fe22cf597f46ba *man/plot_RadialPlot.Rd
-5dd93419c7cbaa0c83a2350792b74ded *man/plot_Risoe.BINfileData.Rd
-15878fc8c6efc4a5ffe1c086ddde687d *man/plot_ViolinPlot.Rd
-2db23c413280b1ceacb92786d223221e *man/read_BIN2R.Rd
-a5b18d92de11221f18230fae56edc2bb *man/read_Daybreak2R.Rd
-41bad2f3d619a56ab3b96eaf9fa7a7d2 *man/read_SPE2R.Rd
-64b4051fba2e7e72a223e1d8f71d25d8 *man/read_XSYG2R.Rd
-34f97f9bf23ed57cb0ce2e57f21ee1fd *man/replicate_RLum.Rd
-e88598b8db0e446b1f4b6ed4d4726b71 *man/report_RLum.Rd
-6d94dde798f52744ed7b2c6441cc08c6 *man/sTeve.Rd
-9780fe682dba53efe53fe28773ad32c4 *man/set_RLum.Rd
-b442db81eda6ccf8b860893bfeeaa737 *man/set_Risoe.BINfileData.Rd
-0db898f0db30c9dd8a5ee02e59510dac *man/structure_RLum.Rd
-ced903c1ce612d683004923c8cfd5b39 *man/template_DRAC.Rd
-0bc5b9d9078e954efdad5f1d183c6aff *man/tune_Data.Rd
-263622116992566da3f0b6cc788db56e *man/use_DRAC.Rd
-51dacf72117ea39dc45bb22330439d21 *man/verify_SingleGrainData.Rd
-e9a96cd7970d3c2f447d63dbcdb4d9ff *man/write_R2BIN.Rd
-0500e163992f25bdb1746049f772fe63 *src/RcppExports.cpp
-6bacfacce37d289d8ca7fa19384c2fad *src/analyse_IRSARRF_SRS.cpp
+122b026c56fab28a782c4642e9d8db15 *man/Analyse_SAR.OSLdata.Rd
+61a8c6a0f8d2f978c42713a62d17b184 *man/BaseDataSet.CosmicDoseRate.Rd
+c4ccea82cbbfd7f29ced37061b932f2c *man/CW2pHMi.Rd
+40776bc2b68733736bdae5aad1b88d38 *man/CW2pLM.Rd
+9694109ce906a82559aecfdbe1ac1df5 *man/CW2pLMi.Rd
+c2ea84800157c0fd304717bb7e5c4a2a *man/CW2pPMi.Rd
+a35fea9269153ca92eea85276f59015a *man/ExampleData.BINfileData.Rd
+5fb9286fb7d17d5ef45cb5fc07640bd2 *man/ExampleData.CW_OSL_Curve.Rd
+7834a3d2fcd83b3ae434bb70d51ba8c0 *man/ExampleData.DeValues.Rd
+a14dce9efe81a4d4944d280d22435939 *man/ExampleData.Fading.Rd
+8e74b8580e300a45b73527c49b673e87 *man/ExampleData.FittingLM.Rd
+675f02bcffd91fa4722fb1d2ec5ae75b *man/ExampleData.LxTxData.Rd
+863a4552b223c20e693c5df673b9022e *man/ExampleData.LxTxOSLData.Rd
+6e5a77bbe10d76fa2454c3ad113ad98e *man/ExampleData.RLum.Analysis.Rd
+1780f102250b7b2a656287b1698f008e *man/ExampleData.RLum.Data.Image.Rd
+987d0a23a542d69133675d79a0e2dd03 *man/ExampleData.XSYG.Rd
+9825014322a727e73bc9f26815cfca1d *man/ExampleData.portableOSL.Rd
+2196f1ef636ae248bf199ad1981661d5 *man/GitHub-API.Rd
+9e130bb188ee18f9e6ee92309be0081a *man/Luminescence-package.Rd
+c4779e8abde73aba966b39c057e0063e *man/PSL2Risoe.BINfileData.Rd
+1952112acd3f0c69bc20a397e7004ab7 *man/RLum-class.Rd
+14a10195c0819478baac4d7848e01778 *man/RLum.Analysis-class.Rd
+62278eae044ef3d373fd585bad5a8a45 *man/RLum.Data-class.Rd
+7896166ed75ce3c962118936a5bb3021 *man/RLum.Data.Curve-class.Rd
+e6d36ccde30580931dc24a0ee5bd4208 *man/RLum.Data.Image-class.Rd
+881ab2fef13643e3eef347caecea5e12 *man/RLum.Data.Spectrum-class.Rd
+1c0a443ad5cda894350d940343686d27 *man/RLum.Results-class.Rd
+e9398c640d0aca47125479e68af16a3a *man/Risoe.BINfileData-class.Rd
+39786244dcd2a6b9fa406015c57ffc57 *man/Risoe.BINfileData2RLum.Analysis.Rd
+1b06c60cbe42012c3dea71a954881a0e *man/Second2Gray.Rd
+c0baf642a5abc68b995d11912e4b377f *man/analyse_FadingMeasurement.Rd
+8dfb62b840581b940b42ae85499f0163 *man/analyse_IRSAR.RF.Rd
+683f8130e8cafae9096b2a6b284be420 *man/analyse_SAR.CWOSL.Rd
+d167655f9ddd07772092a9aa569f84c2 *man/analyse_SAR.TL.Rd
+a26c4b998ff28232ee3e0e2a35394470 *man/analyse_baSAR.Rd
+14e106121723223a5829a38b819a26ec *man/analyse_pIRIRSequence.Rd
+331ba5f917e28800c0a17b3b76423f85 *man/analyse_portableOSL.Rd
+107e00777bc058084c86638b0c73a898 *man/app_RLum.Rd
+6e7c349ff045d69ba1f3faadd188f567 *man/apply_CosmicRayRemoval.Rd
+0952c006be3a85fa3c9f121ffd257181 *man/apply_EfficiencyCorrection.Rd
+9a68bfd566bc2e0a3eb91d31f3400121 *man/as.Rd
+278963ab4f30209b16d3f8d6197d705a *man/bin_RLum.Data.Rd
+df1023c4eec37e530268ee56784a1bd9 *man/calc_AliquotSize.Rd
+30227879b57117d465c05da84d7d3423 *man/calc_AverageDose.Rd
+24743f69920b71b5fae7daed199417f9 *man/calc_CentralDose.Rd
+812e54356c7b4bb35e32ce2289ca72b8 *man/calc_CommonDose.Rd
+5339725334b9890e291685d30a591c93 *man/calc_CosmicDoseRate.Rd
+b1ef02d20c55e4f34e0bae7a54cab230 *man/calc_FadingCorr.Rd
+84741e0162f08bf13f06f98c973aa766 *man/calc_FastRatio.Rd
+250b5dd03eba9b02519017b57b585cdb *man/calc_FiniteMixture.Rd
+bc13dff1403e968cb2ed6052e030b2b8 *man/calc_FuchsLang2001.Rd
+f8faeb25180b2d6a4fc2521d0ba67484 *man/calc_HomogeneityTest.Rd
+b4b8d926dd7d651dec5c55d2dc97194b *man/calc_IEU.Rd
+6637ce006c0353d003315677e65001cb *man/calc_Kars2008.Rd
+0db5fa548e44299c6e35956053d71e95 *man/calc_MaxDose.Rd
+6f7e70d7fca9568c893cd6152ae1bd0b *man/calc_MinDose.Rd
+9752ac7ca01877d3f8f874744a37a9ef *man/calc_OSLLxTxRatio.Rd
+4a465d477bcab39f0fe2ca49ec0153b3 *man/calc_SourceDoseRate.Rd
+3b04dd5336c6c746aa02dd8301a348e9 *man/calc_Statistics.Rd
+786f167dcd04d991442803e0374bd44d *man/calc_TLLxTxRatio.Rd
+f06b0cdd7e380e1a35afe638a42e9f1a *man/calc_ThermalLifetime.Rd
+19b96d0345613a002ca43ad72dab7d29 *man/calc_gSGC.Rd
+9fc39985ee79656cbc4fafdbe6cf1a7d *man/convert_BIN2CSV.Rd
+b2ce6aed145722ec8f932605056a67f1 *man/convert_Daybreak2CSV.Rd
+6f6405c0b073a4a79e1bbc8dc976bc4b *man/convert_PSL2CSV.Rd
+b8e135f884cf3d116e90d790d0a276e0 *man/convert_XSYG2CSV.Rd
+2ce9536270596a396c21ce4a07256495 *man/extract_IrradiationTimes.Rd
+deb6016627acb30e3963c2daf22511a8 *man/fit_CWCurve.Rd
+424b1ba34228e6d7c0198343377450d3 *man/fit_LMCurve.Rd
+93807aaebeee680f41193be65d4d5604 *man/get_Layout.Rd
+48819bdaaaac40e6ab71ee7787f6d7ad *man/get_Quote.Rd
+db688fe490d74a008647dd8e21acfce9 *man/get_RLum.Rd
+d508ba4995ab5675836f01bf6eaca613 *man/get_Risoe.BINfileData.Rd
+9911f08925189be2133324767056da90 *man/get_rightAnswer.Rd
+f40f7190eb2e2bfde408bc4b440c7b6c *man/install_DevelopmentVersion.Rd
+2c0fe665b4289eb3e1ec5724cb0c687c *man/length_RLum.Rd
+2e727bdb8dea1aed273e769e983a95ba *man/merge_RLum.Analysis.Rd
+f33100f8f4825b03c644aceca0bc2dab *man/merge_RLum.Data.Curve.Rd
+8a7e06db1d2dc2612e9f53c86eb8bef6 *man/merge_RLum.Rd
+153dedca1a5b115da1ffd7fb79f44a12 *man/merge_RLum.Results.Rd
+64a71644bd1bbf84ace9689835d40fbd *man/merge_Risoe.BINfileData.Rd
+4b444f591a0dd72797d06f47df9a6e72 *man/methods_RLum.Rd
+d4aa82f5ddba4de72737a32366ed1aa3 *man/model_LuminescenceSignals.Rd
+2468be6ad7752c3f631f0d990a505952 *man/names_RLum.Rd
+c6335724c69df978371c7bfc7c55aed7 *man/plot_AbanicoPlot.Rd
+35c76c017bf79c795f76749cf7735d37 *man/plot_DRTResults.Rd
+be5e65fcdb3b81121b69ad07b7376740 *man/plot_DetPlot.Rd
+6ab6a9247ea22ff60a4bd389577da9d7 *man/plot_FilterCombinations.Rd
+72aff33ac4b952e25068050ae644855d *man/plot_GrowthCurve.Rd
+06bfa18bd45d39140b2ecc7ec742021e *man/plot_Histogram.Rd
+d93b4ebed64c3b427a3bd552668cf543 *man/plot_KDE.Rd
+740646199ec67b74dbb3d76ceb3e3581 *man/plot_NRt.Rd
+2000561a2cc32abe8a02f95f013626e9 *man/plot_RLum.Analysis.Rd
+ff5af8e16b1dd6d0a55a79b55d118374 *man/plot_RLum.Data.Curve.Rd
+4cfd6c93f17026fdb4804f7eef468fb6 *man/plot_RLum.Data.Image.Rd
+25f12af2a2506587ccd99c8aa02a5910 *man/plot_RLum.Data.Spectrum.Rd
+101dacd7cc66d1daa3ec65654e105bbe *man/plot_RLum.Rd
+0fecba4945d4b451a24ef84a3df389f7 *man/plot_RLum.Results.Rd
+67d6f3cbe4c7cc553b7befa3ef72f4c4 *man/plot_RadialPlot.Rd
+440b26a73eff77ddf0e25d3a318cc2e7 *man/plot_Risoe.BINfileData.Rd
+925d8b4701e49b962c54adcedc25f24c *man/plot_ViolinPlot.Rd
+38e7576fe5ebb677cc9b14c9521574b5 *man/read_BIN2R.Rd
+e7ea06f1d32856fae9453aa0f4998709 *man/read_Daybreak2R.Rd
+7fb5620e56a4f537b7abe5a25e11ed86 *man/read_PSL2R.Rd
+6fc64a9f058fc93ed99216cd7f2fe316 *man/read_SPE2R.Rd
+c04393a50fde11cfbdba0ea1152fd3ae *man/read_XSYG2R.Rd
+42eb67cc28f768e883b8ec635befe4df *man/replicate_RLum.Rd
+819e9a4b574bce5c8c079be2105e416f *man/report_RLum.Rd
+898b7ef0821a2ba5770cb25641a64bcb *man/sTeve.Rd
+37aac8b57f0db14f8084c3e707064711 *man/set_RLum.Rd
+fafbee0fa7e9bb600141a37c05d907cb *man/set_Risoe.BINfileData.Rd
+a81fe2d1cb76e47c282516ad60ec6232 *man/smooth_RLum.Rd
+88a42283522368818b88086794c390b4 *man/structure_RLum.Rd
+3467b50fe7acf1e088f1c5ad186b7e8a *man/template_DRAC.Rd
+55f46b155f33e16949df94951eb71c9c *man/tune_Data.Rd
+c6ca0781fb2c87a1370d5f66e2255d13 *man/use_DRAC.Rd
+6bb76a1b0e99a076a90235b12e7413db *man/verify_SingleGrainData.Rd
+d75fc5e41c8a49061e68cf15ffe17e91 *man/write_R2BIN.Rd
+d66e25306f6e84738c37b5fd93b2a079 *man/write_RLum2CSV.Rd
+87a1a15fb9460dcaf2cd5c6b8934f1d5 *src/Luminescence_init.c
+d521dbd09a0e8b7ec6b20e53f2994276 *src/RcppExports.cpp
+a8ca8d72e672faad7595dd329589c810 *src/analyse_IRSARRF_SRS.cpp
+4fe2e831c907819c563103feedd70aac *src/create_RLumDataCurve_matrix.cpp
 30434cc523b9b2c9704d7331aefd8a5f *src/create_UID.cpp
+5c33e2021366df205be163e82ca1a759 *tests/testthat.R
+b5212accfaed0ef2373b164513f13416 *tests/testthat/test_Analyse_SAROSLdata.R
+da722a979208f3b55ae9a4c5dc1c02cf *tests/testthat/test_CW2pX.R
+ae1bd3189f87dcd90077ba3f99e55798 *tests/testthat/test_PSL2RisoeBINfiledata.R
+c6a8b578b52e7c432c235dc573473f92 *tests/testthat/test_RisoeBINfileData-class.R
+db9a35c16345701c372e7404378c2c18 *tests/testthat/test_Second2Gray.R
+a9dcdc8274493d7c92c4310483b73dfa *tests/testthat/test_analyse_IRSARRF.R
+50a3ba100df453e3af59550015d9f73f *tests/testthat/test_analyse_SARCWOSL.R
+aff6bdd623021de1a64a1c620817140b *tests/testthat/test_analyse_SARTL.R
+b5504451520a8d78b2768ac13c097871 *tests/testthat/test_analyse_baSAR.R
+12b034ef782492c2324a170a4e0a6a1f *tests/testthat/test_analyse_pIRIRSequence.R
+76adf42e17285a575784c7bfd8e3a18a *tests/testthat/test_analyse_portableOSL.R
+1bb9365493a2e71633428776fc34fdd4 *tests/testthat/test_bin_RLumData.R
+1bcf9b4357d96c47023cdf26d5f96eb3 *tests/testthat/test_calc_AliquotSize.R
+7ab999724414f0364bd4af8809f46cf0 *tests/testthat/test_calc_AverageDose.R
+e65025d6807077ca0a0cd9f79c9c3f7d *tests/testthat/test_calc_CentralDose.R
+f3e684f9cfefc721a9bfddfbc9c01950 *tests/testthat/test_calc_CommonDose.R
+a79160ac6df7646fc3c440b1f346ad29 *tests/testthat/test_calc_CosmicDoseRate.R
+724351aaa09692863ea987e71174b3ee *tests/testthat/test_calc_FadingCorr.R
+ef8a337f704ba7404eb9d52b6ef25f98 *tests/testthat/test_calc_FastRatio.R
+6c4411e2879e2ac4f1879d57682e3ff0 *tests/testthat/test_calc_FiniteMixture.R
+c7bdbb30555290c3c9a14797a8ad7357 *tests/testthat/test_calc_FuchsLang2001.R
+9a18e9f0d2673d311789207899e24ad1 *tests/testthat/test_calc_HomogeneityTest.R
+246eb64860eed66822ce628b6331490a *tests/testthat/test_calc_IEU.R
+fc4aac1eef8e42787a605135cb0dee98 *tests/testthat/test_calc_Kars2008.R
+44d2344f673e3a7b09549dc3229efb01 *tests/testthat/test_calc_MaxDose.R
+94ec5bd92907b97a0523a36a44054b5d *tests/testthat/test_calc_MinDose.R
+9cecf6480e009a7a9fc7edfed22aa1b0 *tests/testthat/test_calc_OSLLxTxRatio.R
+64597a16c69ff6c50a0be13eda834375 *tests/testthat/test_calc_SourceDoseRate.R
+c130017fb9e0f31831d95fca9815fe0c *tests/testthat/test_calc_Statistics.R
+66dd969ef474afd721b2eb204b39e186 *tests/testthat/test_calc_TLLxTxRatio.R
+24caaa2f311cc247869d729abd9c953f *tests/testthat/test_calc_ThermalLifetime.R
+3f3573f93891be74b3bb6e428dfb3456 *tests/testthat/test_calc_gSGC.R
+18f3912635a3a51be3dda5e79856c88f *tests/testthat/test_convert_X2CSV.R
+61d8150cdb8afccb664472edea877368 *tests/testthat/test_fit_CWCurve.R
+2bff04b5cd333df3443c86e930769554 *tests/testthat/test_fit_LMCurve.R
+7ecde56887533e797e10de0d970220e9 *tests/testthat/test_get_RLum.R
+d2c03b0a20fdfa55aabf976c061e26cc *tests/testthat/test_merge_RLumDataCurve.R
+7784fdf16b40b1d753986fa5915dcc32 *tests/testthat/test_merge_RisoeBINfileData.R
+65974af970b9bed8bf057c111f25b0ad *tests/testthat/test_names_RLum.R
+3c8f15125781b4142f1256237338658c *tests/testthat/test_plot_AbanicoPlot.R
+09bc8c7bd4016222751f9318b45db67a *tests/testthat/test_plot_Functions.R
+9949cb9a94a8104c1448e18f059d2a09 *tests/testthat/test_plot_GrowthCurve.R
+410d1cf97d0c21772b56ed7dce8b7ceb *tests/testthat/test_read_BIN2R.R
+bdde07283d4a37484fc09b9f25ea169b *tests/testthat/test_replicate_RLum.R
+24266fe2c91ac06752752475030a7913 *tests/testthat/test_smooth_RLum.R
+7f25bcca1a2adf5ff8f520fbc2140e72 *tests/testthat/test_template_DRAC.R
+71a577eca286a0855544b6646ed03287 *tests/testthat/test_verify_SingleGrainData.R
+d34f1d0f0cbf9406e85804fcb034bd3f *tests/testthat/test_write_R2BIN.R
+241a3594ae2ad3b585b7166dabce51d4 *tests/testthat/test_write_RLum2CSV.R
+3795ccc2aa09e748f9aeab17d198f633 *tests/testthat/test_zzz.R
diff --git a/NAMESPACE b/NAMESPACE
index 0cf7df3..3bf5521 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -14,6 +14,7 @@ S3method("[",RLum.Data.Image)
 S3method("[",RLum.Data.Spectrum)
 S3method("[",RLum.Results)
 S3method("[<-",DRAC.list)
+S3method("[<-",RLum.Data.Curve)
 S3method("[[",RLum.Analysis)
 S3method("[[",RLum.Results)
 S3method("[[<-",DRAC.list)
@@ -53,6 +54,7 @@ S3method(print,DRAC.highlights)
 S3method(print,DRAC.list)
 S3method(rep,RLum)
 S3method(row.names,RLum.Data.Spectrum)
+S3method(subset,RLum.Analysis)
 S3method(subset,Risoe.BINfileData)
 S3method(summary,RLum.Analysis)
 S3method(summary,RLum.Data.Curve)
@@ -64,19 +66,23 @@ export(CW2pHMi)
 export(CW2pLM)
 export(CW2pLMi)
 export(CW2pPMi)
+export(PSL2Risoe.BINfileData)
 export(Risoe.BINfileData2RLum.Analysis)
 export(Second2Gray)
+export(analyse_FadingMeasurement)
 export(analyse_IRSAR.RF)
 export(analyse_SAR.CWOSL)
 export(analyse_SAR.TL)
 export(analyse_baSAR)
 export(analyse_pIRIRSequence)
+export(analyse_portableOSL)
 export(app_RLum)
 export(apply_CosmicRayRemoval)
 export(apply_EfficiencyCorrection)
 export(bin.RLum.Data.Curve)
 export(bin_RLum.Data)
 export(calc_AliquotSize)
+export(calc_AverageDose)
 export(calc_CentralDose)
 export(calc_CommonDose)
 export(calc_CosmicDoseRate)
@@ -86,6 +92,7 @@ export(calc_FiniteMixture)
 export(calc_FuchsLang2001)
 export(calc_HomogeneityTest)
 export(calc_IEU)
+export(calc_Kars2008)
 export(calc_MaxDose)
 export(calc_MinDose)
 export(calc_OSLLxTxRatio)
@@ -94,6 +101,10 @@ export(calc_Statistics)
 export(calc_TLLxTxRatio)
 export(calc_ThermalLifetime)
 export(calc_gSGC)
+export(convert_BIN2CSV)
+export(convert_Daybreak2CSV)
+export(convert_PSL2CSV)
+export(convert_XSYG2CSV)
 export(extract_IrradiationTimes)
 export(fit_CWCurve)
 export(fit_LMCurve)
@@ -102,6 +113,10 @@ export(get_Quote)
 export(get_RLum)
 export(get_Risoe.BINfileData)
 export(get_rightAnswer)
+export(github_branches)
+export(github_commits)
+export(github_issues)
+export(install_DevelopmentVersion)
 export(is.RLum)
 export(is.RLum.Analysis)
 export(is.RLum.Data)
@@ -136,6 +151,7 @@ export(plot_Risoe.BINfileData)
 export(plot_ViolinPlot)
 export(read_BIN2R)
 export(read_Daybreak2R)
+export(read_PSL2R)
 export(read_SPE2R)
 export(read_XSYG2R)
 export(replicate_RLum)
@@ -143,12 +159,14 @@ export(report_RLum)
 export(sTeve)
 export(set_RLum)
 export(set_Risoe.BINfileData)
+export(smooth_RLum)
 export(structure_RLum)
 export(template_DRAC)
 export(tune_Data)
 export(use_DRAC)
 export(verify_SingleGrainData)
 export(write_R2BIN)
+export(write_RLum2CSV)
 exportClasses(RLum)
 exportClasses(RLum.Analysis)
 exportClasses(RLum.Data)
@@ -166,13 +184,13 @@ exportMethods(replicate_RLum)
 exportMethods(set_RLum)
 exportMethods(set_Risoe.BINfileData)
 exportMethods(show)
+exportMethods(smooth_RLum)
 exportMethods(structure_RLum)
-import(bbmle)
 import(data.table)
+import(magrittr)
 import(methods)
 import(utils)
 importClassesFrom(raster,RasterBrick)
-importFrom(Rcpp,evalCpp)
 importFrom(grDevices,adjustcolor)
 importFrom(grDevices,axisTicks)
 importFrom(grDevices,colorRampPalette)
@@ -180,6 +198,7 @@ importFrom(grDevices,dev.off)
 importFrom(grDevices,gray.colors)
 importFrom(grDevices,rgb)
 importFrom(grDevices,topo.colors)
+importFrom(grDevices,xy.coords)
 importFrom(graphics,abline)
 importFrom(graphics,arrows)
 importFrom(graphics,axTicks)
@@ -200,6 +219,7 @@ importFrom(graphics,lines)
 importFrom(graphics,mtext)
 importFrom(graphics,par)
 importFrom(graphics,persp)
+importFrom(graphics,plot)
 importFrom(graphics,plot.default)
 importFrom(graphics,points)
 importFrom(graphics,polygon)
@@ -207,17 +227,24 @@ importFrom(graphics,rug)
 importFrom(graphics,segments)
 importFrom(graphics,text)
 importFrom(graphics,title)
+importFrom(httr,GET)
+importFrom(httr,accept_json)
+importFrom(httr,content)
+importFrom(httr,status_code)
 importFrom(parallel,makeCluster)
 importFrom(parallel,parLapply)
 importFrom(parallel,stopCluster)
 importFrom(raster,brick)
 importFrom(raster,contour)
 importFrom(raster,nlayers)
+importFrom(raster,plot)
 importFrom(raster,plotRGB)
 importFrom(raster,raster)
 importFrom(stats,approx)
 importFrom(stats,as.formula)
+importFrom(stats,coef)
 importFrom(stats,complete.cases)
+importFrom(stats,confint)
 importFrom(stats,density)
 importFrom(stats,dnorm)
 importFrom(stats,glm)
@@ -229,7 +256,9 @@ importFrom(stats,nls)
 importFrom(stats,nls.control)
 importFrom(stats,pchisq)
 importFrom(stats,pnorm)
+importFrom(stats,predict)
 importFrom(stats,quantile)
+importFrom(stats,residuals)
 importFrom(stats,rnorm)
 importFrom(stats,runif)
 importFrom(stats,sd)
@@ -239,6 +268,7 @@ importFrom(stats,smooth.spline)
 importFrom(stats,spline)
 importFrom(stats,t.test)
 importFrom(stats,uniroot)
+importFrom(stats,update)
 importFrom(stats,var)
 importFrom(stats,weighted.mean)
-useDynLib(Luminescence)
+useDynLib(Luminescence, .registration = TRUE)
diff --git a/NEWS b/NEWS
index 5ac1963..9467000 100644
--- a/NEWS
+++ b/NEWS
@@ -1,22 +1,491 @@
 NEWS for the R Package Luminescence
 
-Changes in version 0.6.4 (9th September 2016):
+Changes in version 0.7.5 (30th June, 2017):
+
+  Bugfixes and changes:
+
+         • ‘analyse_SAR.CWOSL()’
+
+             • If the signal integral was wrong, the default value was
+               not set correctly (#46).
+
+         • ‘calc_AverageDose()’
+
+             • Update documentation and add produced output,
+
+             • unify data.frame return output arguments (all capital
+               letters).
+
+         • ‘calc_FastRatio()’
+
+             • Update slot names, which led to an output error.
+
+         • ‘extract_IrradiationTimes()’
+
+             • The exported BINX-file now works with the Analyst and
+               the g-value can be calculated therein (thanks to Geoff
+               Duller).
+
+         • ‘plot_FilterCombinations()’
+
+             • Calculate optical density and return it,
+
+             • fix calclation of transmission window,
+
+             • improve plot output.
+
+         • ‘plot_RadialPlot()’
+
+             • Fix error which occasionally occurred if a list of
+               ‘data.frame’s are provided (thanks to Christina Neudorf
+               for spotting the bug).
+
+         • ‘read_BIN2R()’
+
+             • Improve error messages for corrupted BIN/BINX-files,
+
+             • ensure that the file connection is closed sufficiently.
+
+         • ‘RisoeBINfileData2RLum.Analysis()’
+
+             • The grain selection was not accepted and caused a
+               constant error (#45).
+
+         • ‘use_DRAC()’
+
+             • The DRAC URL had changed; fixed.
+
+  Miscellaneous:
+
+         • Fix package welcome message.
+
+Changes in version 0.7.4 (31st March, 2017):
+
+  Changes in S4-classes and methods:
+
+         • ‘get_RLum’ for ‘RLum.Analysis’-objects now returns an error
+           and ‘NULL’ if the ‘record.id’ is not valid.
 
   Bugfixes and changes:
 
          • ‘analyse_baSAR()’
 
-             • Fix problem that causes a function crash if an XLS-file
-               was provided as input for the grain selection.
+             • The option to force the dose response curve trough the
+               origin was not correctly implemented; fixed.
+
+         • ‘analyse_FadingMeasurement()’
+
+             • The function returned unreliable results since the time
+               since irradiation had been doubled. This bug only
+               affected Lx/Tx data imported from an XSYG-file.
+
+         • ‘analyse_SAR.TL()’
+
+             • A test code snippet made it into the final package. With
+               this the Lx/Tx error was taken as fixed value (10/100)
+               from the Lx/Tx value itself. The calculated error was
+               not considered; corrected,
+
+             • function returns ‘NA’ for the error if the background
+               signals are similar and the error would become 0,
+
+             • new argument ‘integral_input’ added to allow for an
+               integral definition based on temperatures and not
+               channels.
+
+         • ‘calc_TLLxTxRatio()’
+
+             • Arguments ‘Lx.data.background’ and ‘Tx.data.background’
+               are now pre-set to ‘NULL’, i.e. the function does not
+               longer check for missing entries.
+
+         • ‘plot_KDE()’
+
+             • Further support for layout options as requested by
+               Christopher Luethgens.
+
+         • ‘plot_GrowthCurve)’
+
+             • Rename argument options for argument ‘mode’ to
+               ‘'interpolation'’ and ‘'extrapolation'’ instead of
+               ‘'regenerative'’ and ‘'additive'’.
+
+             • fix a rather rare bug using the combination
+               ‘fit.force_through_origin = FALSE’ and ‘mode =
+               "extrapolation"’,
+
+             • the graphical representation for ‘mode =
+               "extrapolation"’ was not correct (#38).
+
+         • ‘plot_RLum.Data.Spectrum)’
+
+             • Fixwrong axtick labels for interactive plot option
+               (#39),
+
+             • correct manual.
+
+         • ‘plot_RLum.Analysis)’
+
+             • Add support for the argument 'type' of the argument
+               'combine = TRUE' is used.
+
+         • ‘read_BIN2R()’
+
+             • Correct minor bug while importing corrupt BIN-files,
+
+             • add support for internet connections,
+
+             • if a directory was provided the functions was trapped in
+               an endless loop (#36)
+
+         • ‘write_R2BIN()’
+
+             • Argument 'BL_UNIT' was not correctly exported; fixed,
+
+             • export behaviour for BIN-file version 08 improved.
+
+  Miscellaneous:
+
+         • BIN-file example data sets can now be exported without error
+           to BIN-files using ‘write_R2BIN()’.
+
+Changes in version 0.7.3 (8th Feburary, 2017):
+
+  Bugfixes and changes:
+
+         • ‘Risoe.BINfileData()’
+
+             • Correct for mistakes in the manual.
+
+         • ‘write_R2BIN()’
+
+             • Correct for broken function (introduced with v0.7.0).
+
+  Miscellaneous:
+
+         • Correct wrong package date format.
+
+         • Add NEWS again to the package.
+
+Changes in version 0.7.2 (7th February (evening), 2017):
+
+        • The CRAN check on the Solaris machines gave an error while
+          performing the (on all other platform sucessful) unit tests.
+          Consequently, and to reduce the load for the CRAN resources
+          all tests are skipped on CRAN.
+
+        • This version never made it on CRAN!
+
+Changes in version 0.7.1 (6th February (evening), 2017):
+
+        • This release accounts for the CRAN check errors on the
+          Solaris machines by preventing the unfortunate overload of
+          the C++ function pow() with integer values.
+
+Changes in version 0.7.0 (6th February (morning), 2017):
+
+  New functions:
+
+         • ‘analyse_FadingMeasurement()’: Analyse fading measurements
+           to calculate g-values and to estimate the density of
+           recombination centres.
+
+         • ‘analyse_portableOSL()’: The function analyses CW-OSL curve
+           data produced by a SUERC portable OSL reader and produces a
+           combined plot of OSL/IRSL signal intensities, OSL/IRSL
+           depletion ratios and the IRSL/OSL ratio.
+
+         • ‘calc_Kars2008()’: A function to calculate the expected
+           sample specific fraction of saturation following Kars et al.
+           (2008) and Huntley (2006).
+
+         • ‘calc_AverageDose()’: Function to calculate the average dose
+           and their extrinsic dispersion.
+
+         • ‘convert_BIN2R()’: wrapper function around the functions
+           ‘read_BIN2R()’ and ‘write_RLum2CSV()’ to convert a BIN-file
+           to CSV-files; so far possible.
+
+         • ‘convert_Daybreak2R()’: wrapper function around the
+           functions ‘read_Daybreak2R()’ and ‘write_RLum2CSV()’ to
+           convert Daybreak measurement data (TXT-file, DATE-file) to
+           CSV-files; so far possible.
+
+         • ‘convert_PSL2R()’: wrapper function around the functions
+           ‘read_PSL2R()’ and ‘write_RLum2CSV()’ to convert a PSL-file
+           (SUERC portable OSL reader file format) to CSV-files; so far
+           possible.
+
+         • ‘convert_XSYG2R()’: wrapper function around the functions
+           ‘read_XSYG2R()’ and ‘write_RLum2CSV()’ to convert XSYG-file
+           to CSV-files; so far possible.
+
+         • ‘github_branches(), github_commits(), github_issues()’: R
+           Interface to the GitHub API v3. These functions can be used
+           to query a specific repository hosted on GitHub.
+
+         • ‘install_DevelopmentVersion()’: This function is a
+           convenient method for installing the development version of
+           the R package 'Luminescence' directly from GitHub.
+
+         • ‘PSL2Risoe.BINfileData()’: Converts an ‘RLum.Analysis’
+           object produced by the function ‘read_PSL2R()’ to an
+           ‘Risoe.BINfileData’ object.
+
+         • ‘read_PSL2R()’: Imports PSL files produced by a SUERC
+           portable OSL reader into R.
+
+         • ‘smooth_RLum()’: wrapper function to call the corresponding
+           methods to smooth data based on the function ‘zoo:rollmean’.
+
+         • ‘write_RLum2CSV()’: Exports ‘RLum’-objects to CSV-files to
+           improve the compatibility to other software.  Supported are
+           only numerical values, i.e., ‘data.frame’, ‘matrix’ and
+           ‘numeric’.
+
+  New example data:
+
+         • ‘ExampleData.fading’: Example data set for fading
+           measurements of the IR50, IR100, IR150 and IR225 feldspar
+           signals of sample UNIL/NB123. It further contains regular
+           equivalent dose measurement data of the same sample, which
+           can be used to apply a fading correction to. These data were
+           kindly provided by Georgina King.
+
+  Changes in S4-classes and methods:
+
+         • Method ‘get_RLum’ for ‘RLum.Analysis’-objects did not
+           respect ‘.pid’, fixed.
+
+         • Method ‘get_RLum’ for ‘list’-objects now accepts lists with
+           all kinds of ‘RLum’-objects. Previously, only lists of
+           ‘RLum.Analysis’-objects were allowed.
+
+         • ‘plot_RLum’ was not passing the argument ‘sub’, as it was
+           fetched by the partial argument matching; fixed.
+
+         • ‘set_RLum’ produced ‘NA’ as originator, if the function
+           calling the function ‘set_RLum()’ was called from outside of
+           the package using the double colons (e.g.,
+           ‘Luminescence::function()’); fixed.
+
+         • ‘smooth_RLum’ add method support for ‘RLum.Data.Curve’,
+           ‘RLum.Analysis’ and ‘list’ of this objects implemented.
+
+  Bugfixes and changes:
+
+         • ‘analyse_baSARL()’
+
+             • Due to a typo in the manual the ‘method_control’
+               parameter ‘variable.names’ was not working if correctly
+               typed as written in the manual (in the manual:
+               'variables.names', but correct is 'variable.names');
+               typo corrected fixed,
+
+             • minor improvements and error corrections.
+
+             • ‘analyse_IRSAR.RF()’
+
+                 • Add option for a vertical sliding of the RF_nat
+                   curve (‘method_control = list(vslide_range =
+                   'auto')’). This feature has beta status and usage
+                   for publication work is not recommended yet. By
+                   default no vertical sliding is applied,
+
+                 • allow a parallel processing of MC runs by using the
+                   argument ‘method_control = list(cores = 'auto')’.
+
+             • ‘analyse_SAR.CWOSL()’
+
+                 • Fix wrongly set threshold value for recuperation
+                   rate (#26),
+
+                 • fix a rare bug for the combination 'recyling.ratio =
+                   NA' and more than one provided recyling point,
+
+                 • a check has been implemented to refrain from using
+                   wrong rejection criteria keywords.
+
+             • ‘calc_AliquotSize()’
+
+                 • Console output can now be suppressed via 'verbose =
+                   TRUE' (#24).
+
+             • ‘calc_CosmicDoseRate()’
+
+                 • Console output can now be suppressed via 'verbose =
+                   TRUE' (#24).
+
+             • ‘calc_FastRatio()’
+
+                 • New arguments 'Ch_L2' and 'Ch_L3' to allow the user
+                   to specify custom values for channels L2 and L3.
+                   Feature requested by A. Versendaal (#29).
+
+             • ‘calc_FadingCorr()’
+
+                 • Fixed a bug where the function would crash when
+                   providing an ‘RLum.Results’ object for ‘g_value’,
+
+                 • new argument ‘interval’ to control the age interval
+                   for solving the equation via ‘uniroot’.
+
+             • ‘calc_FiniteMixture()’
+
+                 • Fixed a bug where certain arguments where not passed
+                   to `plot_RLum.Results` so that the plot was not as
+                   customisable as intended.  Thanks to Daniele
+                   Questiaux for reporting the bug.
+
+             • ‘calc_MaxDose()’
+
+                 • Fixed a bug in the console output, which provided
+                   wrong values for the asymmetric error on gamma (Note
+                   that the values in the output object were correct!).
+                   Thankfully reported by Xue Rui.
+
+             • ‘calc_Statistics()’
+
+                 • The argument ‘n.MC’ got a new value ‘NULL’ which is
+                   now used as default. With this the basic statistical
+                   measures are in accordance with the expectations
+                   (e.g., the standard deviation is returned by default
+                   in the conventional way and not calculated using an
+                   MC simulation).
+
+             • ‘calc_OSLLxTxRatio()’
+
+                 • Add argument ‘use_previousBG’ to use the background
+                   of the Lx-curve to get the net signal of the
+                   Tx-curve (request #15).
+
+             • ‘fit_CWCurve()’
+
+                 • Change order of ‘RLum.Results’ output list elements,
+
+                 • rename first element to ‘data’,
+
+                 • add element slot 'info'.
+
+             • ‘fit_LWCurve()’
+
+                 • Change order of ‘RLum.Results’ output list elements,
+
+                 • rename first element to ‘data’,
+
+                 • add element slot 'info'.
+
+             • ‘model_LuminescenceSignals()’
+
+                 • Update function arguments to account for changes in
+                   RLumModel version 0.2.0.
+
+             • ‘plot_DetPlot()’
+
+                 • Correct negative y-axis, the minimum is now the real
+                   minimum.
+
+             • ‘plot_GrowthCurve()’
+
+                 • Reduce number of confusing warning,
+
+                 • add new argument ‘mode’ to select the calculation
+                   mode of the function.  This allows in particular to
+                   only fit data without calculating a De or
+                   calculating a De assuming an additive dose response
+                   curve,
+
+                 • account for the very specifc case that all dose
+                   points are similar. The function stops with an error
+                   and returns NULL,
+
+                 • under weird circumstances points on the growth curve
+                   were not plotted correctly; fixed.
+
+             • ‘plot_RadialPlot()’
+
+                 • Sometimes the function crashed with an out of bonds
+                   error if more than one data set was provided; fixed,
+
+                 • argument ‘negatives’ caused an error if not set to
+                   ‘'remove'’ and fix some errors around this option,
+
+                 • De-adjustment for negative values optimised for
+                   large scatter.
+
+             • ‘plot_RLum.Analysis()’
+
+                 • The usage of the argument ‘smooth’ led to a crash;
+                   fixed.
+
+             • ‘plot_RLum.Data.Curve()’
+
+                 • Function will not stop anymore if the curve contains
+                   ‘NA’ values, but if the curve consists of only ‘NA’
+                   values.
+
+             • ‘plot_RLum.Data.Spectrum()’
+
+                 • The interactive plot option was broken with the last
+                   update of the package 'plotly'; fixed.
+
+             • ‘plot_ViolinPlot()’
+
+                 • The function erroneously produced a NA value
+                   warning; fixed.
+
+             • ‘read_BIN2R()’
+
+                 • If BIN-files are automatically imported the function
+                   skipped non BIN-files without crashing if it is used
+                   in combination with the argument ‘pattern’,
+
+                 • add new argument ‘irgnore.RECTYPE’ to provide a
+                   solution for broken BIN-files are BIN-files with non
+                   documented entries. Furthermore the general
+                   behaviour for such cases had been optimised.
+
+             • ‘read_Daybreak2R()’
+
+                 • Add support for DAT-files produced by at 1100 reader
+                   using the software (TLAPLLIC v.3.2).  Thanks to
+                   Antoine Zink,
+
+                 • minor error corrections and adding example code.
+
+             • ‘template_DRAC()’
+
+                 • Fixed a typo in the column names (#28).
+
+             • ‘use_DRAC()’
+
+                 • Now supports DRAC v1.2 and the newly introduced CSV
+                   input template.  Older v1.1 excel sheet input
+                   templates are still supported, but users are highly
+                   encouraged to use the new CSV file.
+
+                 • Columns in the output tables are now assigned proper
+                   classes (#27).
+
+  Internals:
+
+         • The internal function converting BIN-file curves to
+           RLum.Data.Curve() objects had been optimised and, amongst
+           others, now uses a function written using Rcpp to create the
+           curve matrix.  The conversion now works ca. two times
+           faster,
+
+         • add ‘`[<-]`’ method for ‘RLum.Data.Curve’ objects,
 
-         • ‘analyse_pIRIRSequence()’
+         • a hint on how to cite a function is now added automatically
+           to every major function manual page,
 
-             • Account for a minor layout problem while plotting the
-               combined growth curve (y-axis scaling was not
-               sufficient)
+         • add 'magrittr' to the package dependencies (imports) to
+           further support the usage of this amazing pipe operator,
 
-         • ‘plot_AbanicoPlot()’
+         • thanks to Johannes Friedrich this release introduces regular
+           unit tests using the package 'testthat' to improve the code
+           quality and stability,
 
-             • The relative and absolute standard deviation were mixed
-               up in in the summary; fixed.
+         • add internal helper function ‘.smoothing’; no Rd entry.
 
diff --git a/R/CW2pHMi.R b/R/CW2pHMi.R
index 08d7242..516772c 100644
--- a/R/CW2pHMi.R
+++ b/R/CW2pHMi.R
@@ -155,166 +155,166 @@ CW2pHMi<- function(
   values,
   delta
 ){
-  
-  
+
+
   ##(1) data.frame or RLum.Data.Curve object?
   if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
-    
-    stop("[CW2pHMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
-    
+
+    stop("[CW2pHMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE)
+
   }
-  
+
   ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
   if(is(values, "RLum.Data.Curve") == TRUE){
-    
+
     if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
-      
-      stop(paste("[CW2pHMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
-                 sep=""))
-      
+
+      stop(paste("[CW2pHMi()] recordType ",values at recordType, " is not allowed for the transformation!",
+                 sep=""), call. = FALSE)
+
     }else{
-      
+
       temp.values <- as(values, "data.frame")
-      
+
     }
-    
+
   }else{
-    
+
     temp.values <- values
-    
+
   }
-  
-  
+
+
   # (1) Transform values ------------------------------------------------------
-  
+
   ##log transformation of the CW-OSL count values
   CW_OSL.log<-log(temp.values[,2])
-  
+
   ##time transformation t >> t'
   t<-temp.values[,1]
-  
+
   ##set delta
   ##if no values for delta is set selected a delta value for a maximum of
   ##two extrapolation points
   if(missing(delta)==TRUE){
-    
+
     i<-10
     delta<-i
     t.transformed<-t-(1/delta)*log(1+delta*t)
-    
+
     while(length(t.transformed[t.transformed<min(t)])>2){
-      
+
       delta<-i
       t.transformed<-t-(1/delta)*log(1+delta*t)
       i<-i+10
-      
+
     }
   }else{
-    
+
     t.transformed<-t-(1/delta)*log(1+delta*t)
-    
+
   }
-  
+
   # (2) Interpolation ---------------------------------------------------------
-  
+
   ##interpolate values, values beyond the range return NA values
   CW_OSL.interpolated <- approx(t,CW_OSL.log, xout=t.transformed, rule=1)
-  
-  
+
+
   ##combine t.transformed and CW_OSL.interpolated in a data.frame
   temp <- data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y))
-  
+
   ##Problem: I some cases the interpolation algorithm is not working properely
   ##and Inf or NaN values are returned
-  
+
   ##fetch row number of the invalid values
   invalid_values.id <- c(which(is.infinite(temp[,2]) | is.nan(temp[,2])))
-  
+
   if(length(invalid_values.id) > 0){
-    
+
     warning(paste(length(invalid_values.id)," values have been found and replaced the mean of the nearest values." ))
-    
+
   }
-  
+
   ##interpolate between the lower and the upper value
   invalid_values.interpolated<-sapply(1:length(invalid_values.id),
                                       function(x) {
-                                        
+
                                         mean(c(temp[invalid_values.id[x]-1,2],
                                                temp[invalid_values.id[x]+1,2]))
-                                        
+
                                       }
   )
-  
+
   ##replace invalid values in data.frame with newly interpolated values
   if(length(invalid_values.id)>0){
     temp[invalid_values.id,2]<-invalid_values.interpolated
   }
-  
+
   # (3) Extrapolate first values of the curve ---------------------------------
-  
+
   ##(a) - find index of first rows which contain NA values (needed for extrapolation)
   temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
-  
+
   ##(b) - fit linear function
   fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
-  
+
   ##select values to extrapolate and predict (extrapolate) values based on the fitted function
   x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
   y.i<-predict(fit.lm,x.i)
-  
+
   ##replace NA values by extrapolated values
   temp[1:length(y.i),2]<-y.i
-  
+
   ##set method values
   temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
-  
+
   ##print a warning message for more than two extrapolation points
   if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")}
-  
+
   # (4) Convert, transform and combine values ---------------------------------
-  
+
   ##unlog CW-OSL count values, i.e. log(CW) >> CW
   CW_OSL<-exp(temp$y)
-  
+
   ##set values for c and P
-  
+
   ##P is the stimulation period
   P<-max(temp.values[,1])
-  
+
   ##c is a dimensionless constant
   c<-(1+(delta*P))/(delta*P)
-  
+
   ##transform CW-OSL values to pLM-OSL values
   pHM<-((delta*t)/(1+(delta*t)))*c*CW_OSL
-  
+
   ##combine all values and exclude NA values
   temp.values <- data.frame(x=t,y.t=pHM,x.t=t.transformed,method=temp.method)
   temp.values <- na.exclude(temp.values)
-  
+
   # (5) Return values ---------------------------------------------------------
-  
+
   ##returns the same data type as the input
   if(is(values, "data.frame") == TRUE){
-    
+
     values <- temp.values
     return(values)
-    
+
   }else{
-    
-    
+
+
     ##add old info elements to new info elements
     temp.info <- c(values at info,
                    CW2pHMi.x.t = list(temp.values$x.t),
                    CW2pHMi.method = list(temp.values$method))
-    
+
     newRLumDataCurves.CW2pHMi <- set_RLum(
       class = "RLum.Data.Curve",
       recordType = values at recordType,
       data = as.matrix(temp.values[,1:2]),
       info = temp.info)
     return(newRLumDataCurves.CW2pHMi)
-    
+
   }
-  
+
 }
diff --git a/R/CW2pLM.R b/R/CW2pLM.R
index 117af49..82cb40e 100644
--- a/R/CW2pLM.R
+++ b/R/CW2pLM.R
@@ -73,8 +73,7 @@ CW2pLM <- function(
 
   ##(1) data.frame or RLum.Data.Curve object?
   if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
-
-    stop("[CW2pLM] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
+    stop("[CW2pLM()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE)
 
   }
 
@@ -83,8 +82,8 @@ CW2pLM <- function(
 
     if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
 
-      stop(paste("[CW2pLM] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
-                 sep=""))
+      stop(paste("[CW2pLM()] recordType ",values at recordType, " is not allowed for the transformation!",
+                 sep=""), call. = FALSE)
 
     }else{
 
diff --git a/R/CW2pLMi.R b/R/CW2pLMi.R
index c3489eb..b0a144c 100644
--- a/R/CW2pLMi.R
+++ b/R/CW2pLMi.R
@@ -112,152 +112,151 @@ CW2pLMi<- function(
   values,
   P
 ){
-  
+
   # (0) Integrity checks -------------------------------------------------------
-  
+
   ##(1) data.frame or RLum.Data.Curve object?
   if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
-    
-    stop("[CW2pLMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
-    
+    stop("[CW2pLMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE)
+
   }
-  
+
   ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
   if(is(values, "RLum.Data.Curve") == TRUE){
-    
+
     if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
-      
-      stop(paste("[CW2pLMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
-                 sep=""))
-      
+
+      stop(paste("[CW2pLMi()] recordType ",values at recordType, " is not allowed for the transformation!",
+                 sep=""), call. = FALSE)
+
     }else{
-      
+
       temp.values <- as(values, "data.frame")
-      
+
     }
-    
+
   }else{
-    
+
     temp.values <- values
-    
+
   }
-  
-  
+
+
   # (1) Transform values ------------------------------------------------------------------------
-  
-  
+
+
   ##(a) log transformation of the CW-OSL count values
   CW_OSL.log<-log(temp.values[,2])
-  
+
   ##(b) time transformation t >> t'
   t<-temp.values[,1]
-  
+
   ##set P
   ##if no values for P is set selected a P value for a maximum of
   ##two extrapolation points
   if(missing(P)==TRUE){
-    
+
     i<-10
     P<-1/i
     t.transformed<-0.5*1/P*t^2
-    
+
     while(length(t.transformed[t.transformed<min(t)])>2){
-      
+
       P<-1/i
       t.transformed<-0.5*1/P*t^2
       i<-i+10
-      
+
     }#end::while
   }else{
-    
-    if(P==0){stop("[CW2pLMi] Error: P has to be > 0!")}
+
+    if(P==0){stop("[CW2pLMi] P has to be > 0!", call. = FALSE)}
     t.transformed<-0.5*1/P*t^2
-    
+
   }
   #endif
-  
+
   # (2) Interpolation ---------------------------------------------------------------------------
-  
+
   ##interpolate values, values beyond the range return NA values
   CW_OSL.interpolated<-approx(t,CW_OSL.log, xout=t.transformed, rule=1 )
-  
+
   ##combine t.transformed and CW_OSL.interpolated in a data.frame
   temp<-data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y))
-  
+
   ##Problem: I rare cases the interpolation is not working properely and Inf or NaN values are returned
-  
+
   ##Fetch row number of the invalid values
   invalid_values.id<-c(which(is.infinite(temp[,2]) | is.nan(temp[,2])))
-  
+
   ##interpolate between the lower and the upper value
   invalid_values.interpolated<-sapply(1:length(invalid_values.id),
                                       function(x) {
                                         mean(c(temp[invalid_values.id[x]-1,2],temp[invalid_values.id[x]+1,2]))
                                       }
   )
-  
+
   ##replace invalid values in data.frame with newly interpolated values
   if(length(invalid_values.id)>0){
     temp[invalid_values.id,2]<-invalid_values.interpolated
   }
-  
+
   # (3) Extrapolate first values of the curve ---------------------------------------------------
-  
-  
+
+
   ##(a) - find index of first rows which contain NA values (needed for extrapolation)
   temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
-  
+
   ##(b) - fit linear function
   fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
-  
+
   ##select values to extrapolate and predict (extrapolate) values based on the fitted function
   x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
   y.i<-predict(fit.lm,x.i)
-  
+
   ##replace NA values by extrapolated values
   temp[1:length(y.i),2]<-y.i
-  
+
   ##set method values
   temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
-  
+
   ##print a warning message for more than two extrapolation points
   if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")}
-  
+
   # (4) Convert, transform and combine values ---------------------------------------------------
-  
+
   ##unlog CW-OSL count values, i.e. log(CW) >> CW
   CW_OSL<-exp(temp$y)
-  
+
   ##transform CW-OSL values to pLM-OSL values
   pLM<-1/P*t*CW_OSL
-  
+
   ##combine all values and exclude NA values
   temp.values <- data.frame(x=t,y.t=pLM,x.t=t.transformed, method=temp.method)
   temp.values <- na.exclude(temp.values)
-  
+
   # (5) Return values ---------------------------------------------------------------------------
-  
+
   ##returns the same data type as the input
   if(is(values, "data.frame") == TRUE){
-    
+
     values <- temp.values
     return(values)
-    
+
   }else{
-    
-    
+
+
     ##add old info elements to new info elements
     temp.info <- c(values at info,
                    CW2pLMi.x.t = list(temp.values$x.t),
                    CW2pLMi.method = list(temp.values$method))
-    
+
     newRLumDataCurves.CW2pLMi <- set_RLum(
       class = "RLum.Data.Curve",
       recordType = values at recordType,
       data = as.matrix(temp.values[,1:2]),
       info = temp.info)
     return(newRLumDataCurves.CW2pLMi)
-    
+
   }
-  
+
 }
diff --git a/R/CW2pPMi.R b/R/CW2pPMi.R
index 244b854..b688c7d 100644
--- a/R/CW2pPMi.R
+++ b/R/CW2pPMi.R
@@ -119,135 +119,133 @@ CW2pPMi<- function(
   values,
   P
 ){
-  
+
   # (0) Integrity checks ------------------------------------------------------
-  
+
   ##(1) data.frame or RLum.Data.Curve object?
   if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){
-    
-    stop("[CW2pPMi()] Error: 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!")
-    
+    stop("[CW2pPMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE)
+
   }
-  
+
   ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves
   if(is(values, "RLum.Data.Curve") == TRUE){
-    
+
     if(!grepl("OSL", values at recordType) & !grepl("IRSL", values at recordType)){
-      
-      stop(paste("[CW2pPMi()] Error: curve type ",values at recordType, "  is not allowed for the transformation!",
-                 sep=""))
-      
+      stop(paste("[CW2pPMi()] recordType ",values at recordType, " is not allowed for the transformation!",
+                 sep=""), call. = FALSE)
+
     }else{
-      
+
       temp.values <- as(values, "data.frame")
-      
+
     }
-    
+
   }else{
-    
+
     temp.values <- values
-    
+
   }
-  
-  
+
+
   # (3) Transform values ------------------------------------------------------
-  
+
   ##log transformation of the CW-OSL count values
   CW_OSL.log<-log(temp.values[,2])
-  
+
   ##time transformation t >> t'
   t<-temp.values[,1]
-  
+
   ##set P
   ##if no values for P is set selected a P value for a maximum of
   ##two extrapolation points
   if(missing(P)==TRUE){
-    
+
     i<-1
     P<-1/i
     t.transformed<-(1/3)*(1/P^2)*t^3
-    
+
     while(length(t.transformed[t.transformed<min(t)])>2){
-      
+
       P<-1/i
       t.transformed<-(1/3)*(1/P^2)*t^3
       i<-i+1
-      
+
     }
   }else{
-    
+
     t.transformed<-(1/3)*(1/P^2)*t^3
-    
+
   }
-  
+
   # (4) Interpolation ---------------------------------------------------------
-  
-  
+
+
   ##interpolate values, values beyond the range return NA values
   CW_OSL.interpolated <- approx(t, CW_OSL.log, xout=t.transformed, rule=1 )
-  
+
   ##combine t.transformed and CW_OSL.interpolated in a data.frame
   temp<-data.frame(x=t.transformed, y = unlist(CW_OSL.interpolated$y))
-  
-  
+
+
   # (5) Extrapolate first values of the curve ---------------------------------
-  
+
   ##(a) - find index of first rows which contain NA values (needed for extrapolation)
   temp.sel.id<-min(which(is.na(temp[,2])==FALSE))
-  
+
   ##(b) - fit linear function
   fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2]))
-  
+
   ##select values to extrapolate and predict (extrapolate) values based on the fitted function
   x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1])
   y.i<-predict(fit.lm,x.i)
-  
+
   ##replace NA values by extrapolated values
   temp[1:length(y.i),2]<-y.i
-  
+
   ##set method values
   temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i))))
-  
-  
+
+
   ##print a warning message for more than two extrapolation points
   if(temp.sel.id>2){warning("t' is beyond the time resolution. Only two data points have been extrapolated, the first ",temp.sel.id-3, " points have been set to 0!")}
-  
+
   # (6) Convert, transform and combine values ---------------------------------
-  
+
   ##unlog CW-OSL count values, i.e. log(CW) >> CW
   CW_OSL<-exp(temp$y)
-  
+
   ##transform CW-OSL values to pPM-OSL values
-  
+
   pPM<-(t^2/P^2)*CW_OSL
-  
+
   ##combine all values and exclude NA values
   temp.values <- data.frame(x=t, y.t=pPM, x.t=t.transformed, method=temp.method)
   temp.values <- na.exclude(temp.values)
-  
+
   # (7) Return values ---------------------------------------------------------
-  
+
   ##returns the same data type as the input
   if(is(values, "data.frame") == TRUE){
-    
+
     values <- temp.values
     return(values)
-    
+
   }else{
-    
-    
+
+
     ##add old info elements to new info elements
     temp.info <- c(values at info,
                    CW2pPMi.x.t = list(temp.values$x.t),
                    CW2pPMi.method = list(temp.values$method))
-    
+
     newRLumDataCurves.CW2pPMi <- set_RLum(
       class = "RLum.Data.Curve",
       recordType = values at recordType,
       data = as.matrix(temp.values[,1:2]),
       info = temp.info)
     return(newRLumDataCurves.CW2pPMi)
-    
+
   }
-  
+
 }
diff --git a/R/Luminescence-package.R b/R/Luminescence-package.R
index 2bed4ca..795b44f 100644
--- a/R/Luminescence-package.R
+++ b/R/Luminescence-package.R
@@ -6,60 +6,60 @@
 #' plotting of equivalent dose distributions.
 #'
 #' \tabular{ll}{ Package: \tab Luminescence\cr Type: \tab Package\cr Version:
-#' \tab 0.6.4 \cr Date: \tab 2016-09-09 \cr License: \tab GPL-3\cr }
+#' \tab 0.7.5 \cr Date: \tab 2017-06-30 \cr License: \tab GPL-3\cr }
 #'
 #' @name Luminescence-package
 #' @aliases Luminescence-package Luminescence
 #' @docType package
-#' @author \bold{Authors} (alphabetic order)
+#' @author \bold{Full list of authors and contributors} (alphabetic order)
 #'
 #' \tabular{ll}{
 #' Christoph Burow \tab University of Cologne, Germany \cr
+#' Claire Christophe \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
 #' Michael Dietze \tab GFZ Helmholtz Centre Potsdam, Germany \cr
 #' Julie Durcan \tab University of Oxford, United Kingdom \cr
 #' Manfred Fischer\tab University of Bayreuth, Germany \cr
 #' Margret C. Fuchs \tab Helmholtz-Zentrum Dresden-Rossendorf, Helmholtz-Institute Freiberg for Resource Technology,
 #' Freiberg, Germany \cr
 #' Johannes Friedrich \tab University of Bayreuth, Germany \cr
-#' Georgina King \tab University of Cologne, Germany \cr
+#' Guillaume Guerin \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+#' Georgina King \tab Institute of Geological Sciences, University of Bern, Switzerland \cr
 #' Sebastian Kreutzer \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
 #' Norbert Mercier \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+#' Anne Philippe \tab  Universite de Nantes and ANJA INRIA, Rennes, France \cr
 #' Christoph Schmidt \tab University of Bayreuth, Germany \cr
-#' Rachel K. Smedley \tab Aberystwyth University, United Kingdom
-#
+#' Rachel K. Smedley \tab Aberystwyth University, United Kingdom \cr
+#' Antoine Zink \tab C2RMF, Palais du Louvre, Paris, France
 #' }
 #'
-#' \bold{Beta-Tester}
-#'
-#' Thomas Kolb, University of Bayreuth, Germany\cr
-#'
-#' \bold{Supervisor}
+#' \bold{Supervisor of the initial version in 2012}
 #'
 #' Markus Fuchs, Justus-Liebig-University Giessen, Germany\cr
 #'
 #' \bold{Support contact}
 #'
-#' \email{developers@@r-luminescence.de}\cr
+#' \email{developers@@r-luminescence.org}\cr
 #'
 #' We may further encourage the usage of our support forum. For this please
 #' visit our project website (link below).
 #'
 #' \bold{Bug reporting}
 #'
-#' \email{bugtracker@@r-luminescence.de} \cr
+#' \email{developers@@r-luminescence.org} or \cr
+#' \url{https://github.com/R-Lum/Luminescence/issues} \cr
 #'
 #' \bold{Project website}
 #'
-#' \url{http://www.r-luminescence.de}\cr
+#' \url{http://www.r-luminescence.org}\cr
 #'
 #' \bold{Project source code repository}\cr
 #' \url{https://github.com/R-Lum/Luminescence}\cr
 #'
 #' \bold{Related package projects}\cr
 #' \url{https://cran.r-project.org/package=RLumShiny}\cr
-#' \url{http://shiny.r-luminescence.de}\cr
+#' \url{http://shiny.r-luminescence.org}\cr
 #' \url{https://cran.r-project.org/package=RLumModel}\cr
-#' \url{http://model.r-luminescence.de}\cr
+#' \url{http://model.r-luminescence.org}\cr
 #'
 #' \bold{Package maintainer}
 #'
@@ -70,7 +70,7 @@
 #'
 #' Cooperation and personal exchange between the developers is gratefully
 #' funded by the DFG (SCHM 3051/3-1) in the framework of the program
-#' "Scientific Networks". Project title: "Lum.Network: Ein
+#' "Scientific Networks". Project title: "RLum.Network: Ein
 #' Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2017)
 #'
 #' @references Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M.,
@@ -94,14 +94,14 @@
 #' Ancient TL 33, 16-21.
 #'
 #' @keywords package
-#' @import utils methods data.table bbmle
-#' @importFrom raster nlayers raster contour plotRGB brick
-#' @importFrom graphics plot.default frame abline mtext text lines par layout lines arrows axTicks axis barplot box boxplot contour curve grconvertX grconvertY hist legend persp points polygon rug segments title grid
-#' @importFrom grDevices adjustcolor axisTicks colorRampPalette gray.colors rgb topo.colors dev.off
-#' @importFrom stats approx as.formula complete.cases density dnorm glm lm median na.exclude na.omit nls nls.control pchisq pnorm quantile rnorm runif sd smooth smooth.spline spline t.test uniroot var weighted.mean setNames
+#' @import utils methods data.table magrittr
+#' @importFrom raster nlayers raster contour plot plotRGB brick
+#' @importFrom graphics plot plot.default frame abline mtext text lines par layout lines arrows axTicks axis barplot box boxplot contour curve grconvertX grconvertY hist legend persp points polygon rug segments title grid
+#' @importFrom grDevices adjustcolor axisTicks colorRampPalette gray.colors rgb topo.colors xy.coords dev.off
+#' @importFrom stats approx as.formula complete.cases density dnorm glm lm median na.exclude na.omit nls nls.control pchisq pnorm quantile rnorm runif sd smooth smooth.spline spline t.test uniroot var weighted.mean setNames coef confint predict update residuals
 #' @importFrom parallel parLapply makeCluster stopCluster
-#' @importFrom Rcpp evalCpp
-#' @useDynLib Luminescence
+#' @importFrom httr GET accept_json status_code content
+#' @useDynLib Luminescence, .registration = TRUE
 NULL
 
 
@@ -311,6 +311,29 @@ NULL
 
 
 
+#' Example portable OSL curve data for the package Luminescence
+#'
+#' A \code{list} of \code{\linkS4class{RLum.Analysis}} objects, each containing
+#' the same number of \code{\linkS4class{RLum.Data.Curve}} objects representing
+#' individual OSL, IRSL and dark count measurements of a sample.
+#'
+#' @name ExampleData.portableOSL
+#' @docType data
+#'
+#' @source \bold{ExampleData.portableOSL}
+#'
+#' \tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory\cr Lab-Code: \tab
+#' - \cr Location: \tab Nievenheim/Germany\cr Material: \tab Fine grain quartz
+#' \cr Reference: \tab unpublished data }
+#'
+#' @keywords datasets
+#' @examples
+#'
+#' data(ExampleData.portableOSL, envir = environment())
+#' plot_RLum(ExampleData.portableOSL)
+#'
+NULL
+
 
 
 #' Example data for fit_LMCurve() in the package Luminescence
@@ -605,3 +628,101 @@ NULL
 #'
 #' @name ExampleData.DeValues
 NULL
+
+
+#' Example data for feldspar fading measurements
+#'
+#' Example data set for fading measurements of the IR50, IR100, IR150 and
+#' IR225 feldspar signals of sample UNIL/NB123. It further contains regular equivalent dose
+#' measurement data of the same sample, which can be used to apply a
+#' fading correction to.
+#'
+#'
+#' @format A \code{\link{list}} with two elements, each containing a further
+#' \code{\link{list}} of \code{\link{data.frame}}s containing the data
+#' on the fading and equivalent dose measurements:
+#'
+#' \describe{
+#'
+#' \code{$fading.data}: A named \code{\link{list}} of \code{\link{data.frame}}s,
+#' each having three named columns (\code{LxTx, LxTx.error, timeSinceIrradiation}).\cr
+#' \code{..$IR50}: Fading data of the IR50 signal.\cr
+#' \code{..$IR100}: Fading data of the IR100 signal.\cr
+#' \code{..$IR150}: Fading data of the IR150 signal.\cr
+#' \code{..$IR225}: Fading data of the IR225 signal.\cr
+#' \cr\cr
+#'
+#' \code{$equivalentDose.data}: A named of \code{\link{data.frame}}s,
+#' each having three named columns (\code{dose, LxTx, LxTx.error}).\cr
+#' \code{..$IR50}: Equivalent dose measurement data of the IR50 signal.\cr
+#' \code{..$IR100}: Equivalent dose measurement data of the IR100 signal.\cr
+#' \code{..$IR150}: Equivalent dose measurement data of the IR150 signal.\cr
+#' \code{..$IR225}: Equivalent dose measurement data of the IR225 signal.\cr
+#' \cr\cr
+#'
+#' }
+#'
+#' @source
+#'
+#' These data were kindly provided by Georgina King. Detailed information
+#' on the sample UNIL/NB123 can be found in the reference given below. The raw
+#' data can be found in the accompanying supplementary information.
+#'
+#' @references
+#'
+#' King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016.
+#' Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004
+#'
+#' \bold{Details} \cr
+#' \tabular{ll}{
+#' Lab: \tab University of Lausanne \cr
+#' Lab-Code: \tab UNIL/NB123 \cr
+#' Location: \tab Namche Barwa (eastern Himalaya)\cr
+#' Material: \tab Coarse grained (180-212 microns) potassium feldspar \cr
+#' Units: \tab Values are given in seconds \cr
+#' Lab Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.1335 +/-
+#' 0.004 Gy/s \cr
+#' Environmental Dose Rate: \tab 7.00 +/- 0.92 Gy/ka (includes internal dose rate)
+#' }
+#'
+#'
+#' @keywords datasets
+#'
+#' @examples
+#'
+#' ## Load example data
+#' data("ExampleData.Fading", envir = environment())
+#'
+#' ## Get fading measurement data of the IR50 signal
+#' IR50_fading <- ExampleData.Fading$fading.data$IR50
+#' head(IR50_fading)
+#'
+#' ## Determine g-value and rho' for the IR50 signal
+#' IR50_fading.res <- analyse_FadingMeasurement(IR50_fading)
+#'
+#' ## Show g-value and rho' results
+#' gval <- get_RLum(IR50_fading.res)
+#' rhop <- get_RLum(IR50_fading.res, "rho_prime")
+#'
+#' gval
+#' rhop
+#'
+#' ## Get LxTx values of the IR50 DE measurement
+#' IR50_De.LxTx <- ExampleData.Fading$equivalentDose.data$IR50
+#'
+#' ## Calculate the De of the IR50 signal
+#' IR50_De <- plot_GrowthCurve(IR50_De.LxTx,
+#'                                 mode = "interpolation",
+#'                                 fit.method = "EXP")
+#'
+#' ## Extract the calculated De and its error
+#' IR50_De.res <- get_RLum(IR50_De)
+#' De <- c(IR50_De.res$De, IR50_De.res$De.Error)
+#'
+#' ## Apply fading correction (age conversion greatly simplified)
+#' IR50_Age <- De / 7.00
+#' IR50_Age.corr <- calc_FadingCorr(IR50_Age, g_value = IR50_fading.res)
+#'
+#'
+#' @name ExampleData.Fading
+NULL
diff --git a/R/PSL2Risoe.BINfileData.R b/R/PSL2Risoe.BINfileData.R
new file mode 100644
index 0000000..32a4e44
--- /dev/null
+++ b/R/PSL2Risoe.BINfileData.R
@@ -0,0 +1,186 @@
+#' Convert portable OSL data to an Risoe.BINfileData object
+#'
+#' Converts an \code{RLum.Analysis} object produced by the function \code{read_PSL2R()} to
+#' an \code{Risoe.BINfileData} object \bold{(BETA)}.
+#'
+#' This function converts an \code{\linkS4class{RLum.Analysis}} object that was produced
+#' by the \code{\link{read_PSL2R}} function to an \code{\linkS4class{Risoe.BINfileData}}.
+#' The \code{Risoe.BINfileData} can be used to write a Risoe BIN file via
+#' \code{\link{write_R2BIN}}.
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}):
+#' \code{RLum.Analysis} object produced by \code{\link{read_PSL2R}}
+#'
+#' @param ... currently not used.
+#'
+#' @return Returns an S4 \code{\linkS4class{Risoe.BINfileData}} object that can
+#' be used to write a BIN file using \code{\link{write_R2BIN}}.
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{Risoe.BINfileData}}
+#'
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @section Function version: 0.0.1
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' # (1) load and plot example data set
+#' data("ExampleData.portableOSL", envir = environment())
+#' plot_RLum(ExampleData.portableOSL)
+#'
+#' # (2) merge all RLum.Analysis objects into one
+#' merged <- merge_RLum(ExampleData.portableOSL)
+#' merged
+#'
+#' # (3) convert to RisoeBINfile object
+#' bin <- PSL2Risoe.BINfileData(merged)
+#' bin
+#'
+#' # (4) write Risoe BIN file
+#' \dontrun{
+#' write_R2BIN(bin, "~/portableOSL.binx")
+#' }
+#'
+#' @export
+PSL2Risoe.BINfileData <- function(object, ...) {
+
+  ## INTEGRITY CHECKS ----
+  if (!inherits(object, "RLum.Analysis"))
+    stop("Only objects of class 'RLum.Analysis' are allowed.", call. = FALSE)
+  if (!all(sapply(object, class) == "RLum.Data.Curve"))
+    stop("The 'RLum.Analysis' object must only contain objects of class 'RLum.Data.Curve'.", call. = FALSE)
+  if (!all(sapply(object, function(x) x at originator) == "read_PSL2R"))
+    stop("Only objects originating from 'read_PSL2R()' are allowed.", call. = FALSE)
+
+  ## EXTRACT CURVE INFORMATION ----
+  curves <- get_RLum(object)
+
+  ## COLLECT META INFORMATION ----
+  META <- do.call(rbind, lapply(curves, function(x) {
+
+    NPOINTS <- as.integer(x at info$settings$stimulation_time)
+    LTYPE <- x at info$settings$stimulation_unit
+    COMMENT <- x at info$settings$measurement
+    HIGH <- x at info$settings$stimulation_time
+    DATE <- format(x at info$settings$Date, format = "%d%m%y")
+    TIME <- x at info$settings$Time
+    if (nchar(TIME) < 8)
+      TIME <- paste0("0", TIME)
+    SAMPLE <- x at info$settings$Sample
+    FNAME <- x at info$settings$Filename
+    SEQUENCE <- strtrim(paste(x at info$settings$Run_Name, x at info$settings$Sample_no), 8)
+
+
+    return(data.frame(NPOINTS = NPOINTS,
+                LTYPE = LTYPE,
+                COMMENT = COMMENT,
+                HIGH = HIGH,
+                DATE = DATE,
+                TIME = TIME,
+                SAMPLE = SAMPLE,
+                FNAME = FNAME,
+                SEQUENCE = SEQUENCE))
+  }))
+
+  ## SAVE DATA ----
+  DATA <- lapply(curves, function(x) {
+    as.integer(x at data[ ,2])
+  })
+
+  # SAVE METADATA ----
+  METADATA <- data.frame(ID = seq(1, length(curves), 1),
+                         SEL = rep(TRUE, length(curves)),
+                         VERSION = rep(7, length(curves)),
+                         LENGTH = 447 + 4 * META$NPOINTS,
+                         PREVIOUS = 447 + 4 * META$NPOINTS,
+                         NPOINTS = META$NPOINTS,
+                         RUN = seq(1, length(curves), 1),
+                         SET = rep(1, length(curves)),
+                         POSITION = rep(1, length(curves)),
+                         GRAIN = rep(0, length(curves)),
+                         GRAINNUMBER = rep(0, length(curves)),
+                         CURVENO = rep(0, length(curves)),
+                         XCOORD = rep(0, length(curves)),
+                         YCOORD = rep(0, length(curves)),
+                         SAMPLE = META$SAMPLE,
+                         COMMENT = META$COMMENT,
+                         SYSTEMID = rep(0, length(curves)),
+                         FNAME = META$FNAME,
+                         USER = rep("RLum", length(curves)),
+                         TIME = META$TIME,
+                         DATE = META$DATE,
+                         DTYPE = rep("Natural", length(curves)),
+                         BL_TIME = rep(0, length(curves)),
+                         BL_UNIT = rep(0, length(curves)),
+                         NORM1 = rep(0, length(curves)),
+                         NORM2 = rep(0, length(curves)),
+                         NORM3 = rep(0, length(curves)),
+                         BG = rep(0, length(curves)),
+                         SHIFT = rep(0, length(curves)),
+                         TAG = rep(1, length(curves)),
+                         LTYPE = META$LTYPE,
+                         LIGHTSOURCE = rep("None", length(curves)),
+                         LPOWER = rep(100, length(curves)),
+                         LIGHTPOWER = rep(100, length(curves)),
+                         LOW = rep(0, length(curves)),
+                         HIGH = META$HIGH,
+                         RATE = rep(0, length(curves)),
+                         TEMPERATURE = rep(0, length(curves)),
+                         MEASTEMP = rep(0, length(curves)),
+                         AN_TEMP = rep(0, length(curves)),
+                         AN_TIME = rep(0, length(curves)),
+                         TOLDELAY = rep(0, length(curves)),
+                         TOLON = rep(0, length(curves)),
+                         TOLOFF = rep(0, length(curves)),
+                         IRR_TIME = rep(0, length(curves)),
+                         IRR_TYPE = rep(0L, length(curves)),
+                         IRR_UNIT = rep(0, length(curves)),
+                         IRR_DOSERATE = rep(0, length(curves)),
+                         IRR_DOSERATEERR = rep(0, length(curves)),
+                         TIMESINCEIRR = rep(-1, length(curves)),
+                         TIMETICK = rep(1e-07, length(curves)),
+                         ONTIME = rep(0, length(curves)),
+                         OFFTIME = rep(NA, length(curves)),
+                         STIMPERIOD = rep(0, length(curves)),
+                         GATE_ENABLED = rep(0, length(curves)),
+                         ENABLE_FLAGS = rep(0, length(curves)),
+                         GATE_START = rep(0, length(curves)),
+                         GATE_STOP = rep(0, length(curves)),
+                         PTENABLED = rep(0, length(curves)),
+                         DTENABLED = rep(0, length(curves)),
+                         DEADTIME = rep(0, length(curves)),
+                         MAXLPOWER = rep(0, length(curves)),
+                         XRF_ACQTIME = rep(0, length(curves)),
+                         XRF_HV = rep(0, length(curves)),
+                         XRF_CURR = rep(0, length(curves)),
+                         XRF_DEADTIMEF = rep(0, length(curves)),
+                         SEQUENCE = META$SEQUENCE,
+                         DETECTOR_ID = rep(NA, length(curves)),
+                         LOWERFILTER_ID = rep(NA, length(curves)),
+                         UPPERFILTER_ID = rep(NA, length(curves)),
+                         ENOISEFACTOR = rep(NA, length(curves)),
+                         MARKPOS_X1 = rep(0, length(curves)),
+                         MARKPOS_Y1 = rep(0, length(curves)),
+                         MARKPOS_X2 = rep(0, length(curves)),
+                         MARKPOS_Y2 = rep(0, length(curves)),
+                         MARKPOS_X3 = rep(0, length(curves)),
+                         MARKPOS_Y3 = rep(0, length(curves)),
+                         EXTR_START = rep(0, length(curves)),
+                         EXTR_END = rep(0, length(curves)),
+                         RECTYPE = rep(0, length(curves)))
+
+  ## CREATE Risoe.BINfileData OBJECT ----
+  bin <- set_Risoe.BINfileData(METADATA = METADATA,
+                               DATA = DATA,
+                               .RESERVED = list())
+
+
+  ## RETURN VALUE ----
+  return(bin)
+}
+
+
+
diff --git a/R/RLum.Analysis-class.R b/R/RLum.Analysis-class.R
index d61f86b..1820bb5 100644
--- a/R/RLum.Analysis-class.R
+++ b/R/RLum.Analysis-class.R
@@ -1,4 +1,4 @@
-#' @include get_RLum.R set_RLum.R length_RLum.R structure_RLum.R names_RLum.R
+#' @include get_RLum.R set_RLum.R length_RLum.R structure_RLum.R names_RLum.R smooth_RLum.R
 NULL
 
 #' Class \code{"RLum.Analysis"}
@@ -22,7 +22,7 @@ NULL
 #' @section Objects from the Class: Objects can be created by calls of the form
 #' \code{set_RLum("RLum.Analysis", ...)}.
 #'
-#' @section Class version: 0.4.6
+#' @section Class version: 0.4.8
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -128,31 +128,63 @@ setMethod("show",
             if(length(object at records) > 0){
 
               ##get object class types
-              temp <- sapply(1:length(object at records), function(x){
+              temp <- vapply(object at records, function(x){
+                is(x)[1]
 
-                is(object at records[[x]])[1]
+              }, FUN.VALUE = vector(mode = "character", length = 1))
 
-              })
 
               ##print object class types
-              sapply(1:length(table(temp)), function(x){
+              lapply(1:length(table(temp)), function(x){
 
                 ##show RLum class type
                 cat("\n\t .. :",names(table(temp)[x]),":",table(temp)[x])
 
-
                 ##show structure
                 ##set width option ... just an implementation for the tutorial output
-                ifelse(getOption("width")<=50, temp.width <- 4, temp.width  <- 10)
+                ifelse(getOption("width")<=50, temp.width <- 4, temp.width  <- 7)
+
+                ##set linebreak variable
+                linebreak <- FALSE
+                env <- environment()
+
+                ##create terminal output
+                terminal_output <-
+                  vapply(1:length(object at records),  function(i) {
+                    if (names(table(temp)[x]) == is(object at records[[i]])[1]) {
+                      if (i %% temp.width == 0 & i != length(object at records)) {
+                        assign(x = "linebreak", value = TRUE, envir = env)
+                      }
 
-                cat("\n\t .. .. : ",
-                    unlist(sapply(1:length(object at records),  function(i) {
+                      ##FIRST
+                      first <-  paste0("#", i, " ", object at records[[i]]@recordType)
+                      ##LAST
+                      if (i < length(object at records) &&
+                          !is.null(object at records[[i]]@info[["parentID"]]) &&
+                          (object at records[[i]]@info[["parentID"]] ==
+                           object at records[[i+1]]@info[["parentID"]])) {
+                        last <- " <> "
+
+                      }else {
+                        if(i == length(object at records)){
+                          last <- ""
+
+                        }else if (linebreak){
+                          last <- "\n\t .. .. : "
+                          assign(x = "linebreak", value = FALSE, envir = env)
+
+                        }else{
+                          last <- " | "
+                        }
 
-                      if(names(table(temp)[x]) == is(object at records[[i]])[1]){
-                        paste(object at records[[i]]@recordType,
-                              if(i%%temp.width==0 & i!=length(object at records)){"\n\t .. .. : "})
                       }
-                    })))
+                      return(paste0(first,last))
+                    }
+
+              }, FUN.VALUE = vector(mode = "character", length = 1))
+
+                 ##print combined output
+                 cat("\n\t .. .. : ", terminal_output, sep = "")
 
               })
 
@@ -281,6 +313,10 @@ setMethod(
 #' @param info.object [\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
 #' element
 #'
+#' @param subset \code{\link{expression}} (optional): logical expression indicating elements or rows
+#' to keep: missing values are taken as false. This argument takes precedence over all
+#' other arguments, meaning they are not considered when subsetting the object.
+#'
 #' @return
 #'
 #' \bold{\code{get_RLum}}:\cr
@@ -296,10 +332,56 @@ setMethod("get_RLum",
           signature = ("RLum.Analysis"),
 
           function(object, record.id = NULL, recordType = NULL, curveType = NULL, RLum.type = NULL,
-                   protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE, info.object = NULL){
+                   protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE, info.object = NULL, subset = NULL) {
+
+            if (!is.null(substitute(subset))) {
+
+              # To account for different lengths and elements in the @info slot we first
+              # check all unique elements
+              info_el <- unique(unlist(sapply(object at records, function(el) names(el at info))))
+
+              envir <- as.data.frame(do.call(rbind,
+                                             lapply(object at records, function(el) {
+                                               val <- c(curveType = el at curveType, recordType = el at recordType, unlist(el at info))
+
+                                               # add missing info elements and set NA
+                                               if (any(!info_el %in% names(val))) {
+                                                 val_new <- setNames(rep(NA, length(info_el[!info_el %in% names(val)])), info_el[!info_el %in% names(val)])
+                                                 val <- c(val, val_new)
+                                               }
+
+                                               # order the named char vector by its names so we dont mix up the columns
+                                               val <- val[order(names(val))]
+                                               return(val)
+                                               })
+                                             ))
+
+              ##select relevant rows
+              sel <- tryCatch(eval(
+                expr = substitute(subset),
+                envir = envir,
+                enclos = parent.frame()
+              ),
+              error = function(e) {
+                stop("\n\n [ERROR] Invalid subset options. \nValid terms are: ", paste(names(envir), collapse = ", "))
+              })
+
+              if (all(is.na(sel)))
+                sel <- FALSE
+
+              if (any(sel)) {
+                object at records <- object at records[sel]
+                return(object)
+              } else {
+                tmp <- mapply(function(name, op) { message("  ",name, ": ", paste(unique(op),  collapse = ", ")) }, names(envir), envir)
+                message("\n [ERROR] Invalid value, please refer to unique options given above.")
+                return(NULL)
+              }
+
+            }
 
             ##if info.object is set, only the info objects are returned
-            if(!is.null(info.object)) {
+            else if(!is.null(info.object)) {
 
               if(info.object %in% names(object at info)){
 
@@ -328,7 +410,16 @@ setMethod("get_RLum",
               }
 
 
-            } else{
+            } else {
+
+              ##check for records
+              if (length(object at records) == 0) {
+
+                warning("[get_RLum] This RLum.Analysis object has no records! NULL returned!)")
+                return(NULL)
+
+              }
+
               ##record.id
               if (is.null(record.id)) {
                 record.id <- c(1:length(object at records))
@@ -348,7 +439,8 @@ setMethod("get_RLum",
 
               ##check if record.id exists
               if (FALSE %in% (abs(record.id) %in% (1:length(object at records)))) {
-                stop("[get_RLum()] At least one 'record.id' is invalid!")
+                try(stop("[get_RLum()] At least one 'record.id' is invalid!", call. = FALSE))
+                return(NULL)
 
               }
 
@@ -474,7 +566,8 @@ setMethod("get_RLum",
                       class = "RLum.Analysis",
                       originator = originator,
                       records = temp,
-                      protocol = object at protocol
+                      protocol = object at protocol,
+                      .pid = object at .pid
                     )
                     return(temp)
 
@@ -499,7 +592,8 @@ setMethod("get_RLum",
                       class = "RLum.Analysis",
                       originator = originator,
                       records = list(object at records[[record.id]]),
-                      protocol = object at protocol
+                      protocol = object at protocol,
+                      .pid = object at .pid
                     )
                     return(temp)
 
@@ -679,3 +773,36 @@ setMethod("names_RLum",
 
           })
 
+
+####################################################################################################
+###smooth_RLum()
+####################################################################################################
+#' @describeIn RLum.Analysis
+#'
+#' Smoothing of \code{RLum.Data} objects contained in this \code{RLum.Analysis} object
+#' \code{\link[zoo]{rollmean}} or \code{\link[zoo]{rollmedian}}.
+#' In particular the internal function \code{.smoothing} is used.
+#'
+#' @param ... further arguments passed to underlying methods
+#'
+#' @return
+#'
+#' \bold{\code{smooth_RLum}}\cr
+#'
+#' Same object as input, after smoothing
+#'
+#' @export
+setMethod(
+  f = "smooth_RLum",
+  signature = "RLum.Analysis",
+  function(object, ...) {
+
+        object at records <- lapply(object at records, function(x){
+          smooth_RLum(x, ...)
+
+        })
+
+    return(object)
+
+  }
+)
diff --git a/R/RLum.Data.Curve-class.R b/R/RLum.Data.Curve-class.R
index 1cd3292..bb9a7ff 100644
--- a/R/RLum.Data.Curve-class.R
+++ b/R/RLum.Data.Curve-class.R
@@ -1,4 +1,4 @@
-#' @include get_RLum.R set_RLum.R names_RLum.R length_RLum.R bin_RLum.Data.R
+#' @include get_RLum.R set_RLum.R names_RLum.R length_RLum.R bin_RLum.Data.R smooth_RLum.R
 NULL
 
 #' Class \code{"RLum.Data.Curve"}
@@ -30,7 +30,7 @@ NULL
 #' @section Create objects from this Class: Objects can be created by calls of the form
 #' \code{set_RLum(class = "RLum.Data.Curve", ...)}.
 #'
-#' @section Class version: 0.4.1
+#' @section Class version: 0.5.0
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
@@ -164,13 +164,16 @@ setMethod("show",
 
 
             ##print information
-
             cat("\n [RLum.Data.Curve]")
             cat("\n\t recordType:", object at recordType)
             cat("\n\t curveType:",  object at curveType)
             cat("\n\t measured values:", length(object at data[,1]))
             cat("\n\t .. range of x-values:", suppressWarnings(range(object at data[,1])))
-            cat("\n\t .. range of y-values:",  suppressWarnings(range(object at data[,2])))
+            cat("\n\t .. range of y-values:",
+                suppressWarnings(min(object at data[,2], na.rm = TRUE)),
+                suppressWarnings(max(object at data[,2], na.rm = TRUE)),
+                if(anyNA(object at data[,2])){"(contains NA values)"}else{""}
+               )
             cat("\n\t additional info elements:", length(object at info))
             #cat("\n\t\t >> names:", names(object at info))
           }
@@ -246,42 +249,49 @@ setMethod(
 
       ##check for missing .uid
       if(missing(.uid)){
-        info <- data at .uid
+        .uid <- data at .uid
 
       }
 
       ##check for missing .pid
       if(missing(.pid)){
-        info <- data at .pid
+        .pid <- data at .pid
+
+      }
+
+      ##check for missing originator
+      if(missing(originator)){
+        originator <- data at originator
 
       }
 
-      ##set empty clas form object
+      ##set empty class from object
       newRLumDataCurve <- new("RLum.Data.Curve")
 
       ##fill - this is the faster way, filling in new() costs ...
-      newRLumDataCurve at recordType = recordType
-      newRLumDataCurve at curveType = curveType
-      newRLumDataCurve at data = data at data
-      newRLumDataCurve at info = info
-      newRLumDataCurve at .uid = data at .uid
-      newRLumDataCurve at .pid = data at .pid
+      newRLumDataCurve at recordType <- recordType
+      newRLumDataCurve at curveType <- curveType
+      newRLumDataCurve at data <- data at data
+      newRLumDataCurve at info <- info
+      newRLumDataCurve at originator <- originator
+      newRLumDataCurve at .uid <- .uid
+      newRLumDataCurve at .pid <- .pid
 
       return(newRLumDataCurve)
 
     }else{
 
-      ##set empty clas form object
+      ##set empty class form object
       newRLumDataCurve <- new("RLum.Data.Curve")
 
       ##fill - this is the faster way, filling in new() costs ...
-      newRLumDataCurve at originator = originator
-      newRLumDataCurve at recordType = recordType
-      newRLumDataCurve at curveType = curveType
-      newRLumDataCurve at data = data
-      newRLumDataCurve at info = info
-      newRLumDataCurve at .uid = .uid
-      newRLumDataCurve at .pid = .pid
+      newRLumDataCurve at originator <- originator
+      newRLumDataCurve at recordType <- recordType
+      newRLumDataCurve at curveType <- curveType
+      newRLumDataCurve at data <- data
+      newRLumDataCurve at info <- info
+      newRLumDataCurve at .uid <- .uid
+      newRLumDataCurve at .pid <- .pid
 
       return(newRLumDataCurve)
 
@@ -449,10 +459,56 @@ setMethod(f = "bin_RLum.Data",
             } else{
               warning("Argument 'bin_size' invald, nothing was done!")
 
-              ##set matrix
-              return(set_RLum(class = "RLum.Data.Curve",
-                              data = object))
+              ##just return the object
+              return(object)
 
             }
 
           })
+
+####################################################################################################
+###smooth_RLum()
+####################################################################################################
+#' @describeIn RLum.Data.Curve
+#' Smoothing of RLum.Data.Curve objects using the function \code{\link[zoo]{rollmean}} or \code{\link[zoo]{rollmedian}}.
+#' In particular the internal function \code{.smoothing} is used.
+#'
+#' @param k [\code{smooth_RLum}] \code{\link{integer}} (with default): window for the rolling mean; must be odd for rollmedian.
+#' If nothing is set k is set automatically
+#'
+#' @param fill [\code{smooth_RLum}] \code{\link{numeric}} (with default): a vector defining the left and the right hand data
+#'
+#' @param align [\code{smooth_RLum}] \code{\link{character}} (with default): specifying whether the index of the result should be
+#' left- or right-aligned or centered (default) compared to the rolling window of observations, allowed
+#' \code{"right"}, \code{"center"} and \code{left}
+#'
+#' @param method [\code{smooth_RLum}] \code{\link{character}} (with default): defines which method should be applied for the
+#' smoothing: \code{"mean"} or \code{"median"}
+#'
+#' @return
+#'
+#' \bold{\code{smooth_RLum}}\cr
+#'
+#' Same object as input, after smoothing
+#'
+#' @export
+setMethod(
+  f = "smooth_RLum",
+  signature = "RLum.Data.Curve",
+    function(object, k = NULL, fill = NA, align = "right", method = "mean") {
+
+        object at data[,2] <- .smoothing(
+          x = object at data[,2],
+          k = k,
+          fill = fill,
+          align = align,
+          method = method)
+
+        ##return via set function to get a new id
+        set_RLum(class = "RLum.Data.Curve",
+                 originator = "smooth_RLum",
+                 data = object)
+
+    }
+ )
+
diff --git a/R/RcppExports.R b/R/RcppExports.R
index 3dce775..3028217 100644
--- a/R/RcppExports.R
+++ b/R/RcppExports.R
@@ -1,8 +1,12 @@
 # Generated by using Rcpp::compileAttributes() -> do not edit by hand
 # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
 
-.analyse_IRSARRF_SRS <- function(values_regenerated_limited, values_natural_limited, n_MC) {
-    .Call('Luminescence_analyse_IRSARRF_SRS', PACKAGE = 'Luminescence', values_regenerated_limited, values_natural_limited, n_MC)
+.analyse_IRSARRF_SRS <- function(values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace = FALSE) {
+    .Call('Luminescence_analyse_IRSARRF_SRS', PACKAGE = 'Luminescence', values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace)
+}
+
+.create_RLumDataCurve_matrix <- function(DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF) {
+    .Call('Luminescence_create_RLumDataCurve_matrix', PACKAGE = 'Luminescence', DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF)
 }
 
 .create_UID <- function() {
diff --git a/R/Risoe.BINfileData2RLum.Analysis.R b/R/Risoe.BINfileData2RLum.Analysis.R
index d95b24b..14cb666 100644
--- a/R/Risoe.BINfileData2RLum.Analysis.R
+++ b/R/Risoe.BINfileData2RLum.Analysis.R
@@ -41,6 +41,10 @@
 #' @param protocol \code{\link{character}} (optional): sets protocol type for
 #' analysis object. Value may be used by subsequent analysis functions.
 #'
+#' @param keep.empty \code{\link{logical}} (with default): If \code{TRUE} (default)
+#' an \code{RLum.Analysis} object is returned even if it does not contain any
+#' records. Set to \code{FALSE} to discard all empty objects.
+#'
 #' @param txtProgressBar \link{logical} (with default): enables or disables
 #' \code{\link{txtProgressBar}}.
 #'
@@ -49,7 +53,7 @@
 #' @note The \code{protocol} argument of the \code{\linkS4class{RLum.Analysis}}
 #' object is set to 'unknown' if not stated otherwise.
 #'
-#' @section Function version: 0.4.1
+#' @section Function version: 0.4.2
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
@@ -77,6 +81,7 @@ Risoe.BINfileData2RLum.Analysis<- function(
   ltype = NULL,
   dtype = NULL,
   protocol = "unknown",
+  keep.empty = TRUE,
   txtProgressBar = FALSE
 ){
 
@@ -93,6 +98,7 @@ Risoe.BINfileData2RLum.Analysis<- function(
 
   if (is.null(pos)) {
     pos <- unique(object at METADATA[["POSITION"]])
+
   } else{
     ##get and check valid positions and remove invalid numbers from the input
     positions.valid <- unique(object at METADATA[, "POSITION"])
@@ -119,10 +125,11 @@ Risoe.BINfileData2RLum.Analysis<- function(
       grain <- unique(object at METADATA[,"GRAIN"])
 
     }else{
+
+      grain.valid <- unique(object at METADATA[,"GRAIN"])
       if(length(setdiff(grain, grain.valid)) > 0){
-        grain.valid <- unique(object at METADATA[,"GRAIN"])
 
-        warning(paste0("[Risoe.BINfileData2RLum.Analysis()] invalid grain number skipped: ",
+        warning(paste0("[Risoe.BINfileData2RLum.Analysis()] Invalid grain number skipped: ",
                        paste(setdiff(grain, grain.valid), collapse = ", ")), call. = FALSE)
 
         grain <- intersect(grain, grain.valid)
@@ -292,9 +299,14 @@ Risoe.BINfileData2RLum.Analysis<- function(
           originator = "Risoe.BINfileData2RLum.Analysis"
         )
 
+        if (!keep.empty && length(object at records) == 0)
+          return(NULL)
+
         ##add unique id of RLum.Analysis object to each curve object as .pid using internal function
         .set_pid(object)
 
+        return(object)
+
       })
 
       return(object)
@@ -304,7 +316,13 @@ Risoe.BINfileData2RLum.Analysis<- function(
     ##this is necessary to not break with previous code, i.e. if only one element is included
     ##the output is RLum.Analysis and not a list of it
     if(length(object) == 1){
-      invisible(object[[1]][[1]])
+
+      # special case: single grain data with only 1 position produces a nested list
+      # the outer one is of length 1, the nested list has length 100 (100 grains)
+      if (is.list(object[[1]]) && length(object[[1]]) > 1)
+        invisible(unlist(object))
+      else
+        invisible(object[[1]][[1]])
 
     }else{
 
diff --git a/R/Risoe.BINfileData2RLum.Data.Curve.R b/R/Risoe.BINfileData2RLum.Data.Curve.R
index 5ede362..9910aaf 100644
--- a/R/Risoe.BINfileData2RLum.Data.Curve.R
+++ b/R/Risoe.BINfileData2RLum.Data.Curve.R
@@ -36,7 +36,7 @@
 #' @note Due to changes in the BIN-file (version 3 to version 4) format the recalculation of TL-curves might be not
 #' overall correct for cases where the TL measurement is combined with a preheat.
 #'
-#' @section Function version: 0.4.0
+#' @section Function version: 0.5.0
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France),
 #' Christoph Burow, Universtiy of Cologne (Germany)
@@ -67,74 +67,44 @@
   set
 ){
 
+  ##disaggregate object ... this makes it much faster below
+  ##we could also access via index, not number, but this is far to risky, as
+  ##every update in the BIN-file version will break the code here
+  METADATA <- as.list(object at METADATA)
+  DATA <- object at DATA
 
   # grep id of record -------------------------------------------------------
   ##if id is set, no input for pos and rund is nescessary
   if (missing(id)) {
-    id <- object at METADATA[object at METADATA[, "POSITION"] == pos &
-                            object at METADATA[, "SET"] == set &
-                            object at METADATA[, "RUN"] == run,
-                          "ID"]
+    id <- METADATA[METADATA[["POSITION"]] == pos &
+                     METADATA[["SET"]] == set &
+                     METADATA[["RUN"]] == run,
+                   "ID"]
 
   }
 
 
-  # Select values -----------------------------------------------------------
-
-  ##build matrix
-  if(object at METADATA[id,"NPOINTS"][1] != 0){
-
-    if(object at METADATA[id, "LTYPE"] == "TL" && as.numeric(object at METADATA[id, "VERSION"]) >=4){
-
-      temp.x <- c(
-        seq(
-          from = object at METADATA[["LOW"]][id],
-          to = object at METADATA[["AN_TEMP"]][id],
-          length.out = object at METADATA[["TOLDELAY"]][id]
-        ),
-        seq(
-          from = object at METADATA[["AN_TEMP"]][id],
-          to = object at METADATA[["AN_TEMP"]][id],
-          length.out = object at METADATA[["TOLON"]][id]
-        ),
-        seq(
-          from = object at METADATA[["AN_TEMP"]][id],
-          to = object at METADATA[["HIGH"]][id],
-          length.out = object at METADATA[["TOLOFF"]][id]
-        )
-      )
-
-    }else{
-
-      temp.x <- seq(
-        from = object at METADATA[["LOW"]][id],
-        to = object at METADATA[["HIGH"]][id],
-        length.out = object at METADATA[["NPOINTS"]][id]
-      )
-
-    }
-
-    temp.y <- unlist(object at DATA[id], use.names = FALSE)
-
-
-  }else{
-    temp.x <- NA
-    temp.y <- NA
-
-    warning("[.Risoe.BINfileData2RLum.Data.Curve()] NPOINTS was 0, RLum.Data.Curve-object with NA-values produced.")
-
-  }
-
-  ##convert info elements to list ... this procedure halfs the time needed in comparison to
-  ##to simply as.list(object at METADATA)
-  info <- lapply(1:length(names(object at METADATA)), function(x){.subset2(object at METADATA, x)[id]})
-  names(info) <- names(object at METADATA)
+  ##grep info elements
+  info <- lapply(1:length(names(METADATA)), function(x){METADATA[[x]][id]})
+  names(info) <- names(METADATA)
 
   # Build object ------------------------------------------------------------
   set_RLum(
     class = "RLum.Data.Curve",
-    recordType = as.character(object at METADATA[id, "LTYPE"]),
-    data = matrix(c(temp.x, temp.y), ncol = 2),
+    recordType = METADATA[["LTYPE"]][id],
+    data =  .create_RLumDataCurve_matrix(
+      DATA = DATA[[id]],
+      NPOINTS = METADATA[["NPOINTS"]][id],
+      VERSION = METADATA[["VERSION"]][id],
+      LTYPE = METADATA[["LTYPE"]][id],
+      LOW =  METADATA[["LOW"]][id],
+      HIGH =  METADATA[["HIGH"]][id],
+      AN_TEMP = METADATA[["AN_TEMP"]][id],
+      TOLDELAY =METADATA[["TOLDELAY"]][id],
+      TOLON = METADATA[["TOLON"]][id],
+      TOLOFF = METADATA[["TOLOFF"]][id]
+
+    ),
     info = info
   )
 
diff --git a/R/RisoeBINfileData-class.R b/R/RisoeBINfileData-class.R
index 9a63c7e..83a2ead 100644
--- a/R/RisoeBINfileData-class.R
+++ b/R/RisoeBINfileData-class.R
@@ -21,6 +21,10 @@ NULL
 #'
 #' \bold{Internal METADATA - object structure}
 #'
+#' This structure is compatible with BIN-files version 03-08, however, it does not follow (in its
+#' sequential arrangment) the manual provided by the manufacturer,
+#' but an own structure accounting for the different versions.
+#'
 #' \tabular{rllll}{
 #' \bold{#} \tab \bold{Name} \tab \bold{Data Type} \tab \bold{V} \tab \bold{Description} \cr
 #' [,1]  \tab ID  \tab \code{numeric} \tab RLum \tab Unique record ID (same ID as in slot \code{DATA})\cr
@@ -76,20 +80,20 @@ NULL
 #' [,51] \tab TIMESINCEIRR \tab \code{integer} \tab 06-08 \tab Time since irradiation (s)\cr
 #' [,52] \tab TIMETICK \tab \code{numeric} \tab 06-08 \tab Time tick for pulsing (s)\cr
 #' [,53] \tab ONTIME \tab \code{integer} \tab 06-08 \tab On-time for pulsing (in time ticks)\cr
-#' [,54] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
-#' [,55] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
-#' [,56] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
-#' [,57] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
-#' [,58] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
-#' [,59] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
-#' [,60] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
-#' [,61] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
-#' [,62] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
-#' [,63] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
-#' [,64] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
-#' [,65] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
-#' [,66] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
-#' [,67] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name\cr
+#' [,54] \tab OFFTIME \tab \code{integer} \tab 03 \tab Off-time for pulsed stimulation (in s) \cr
+#' [,55] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
+#' [,56] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
+#' [,57] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
+#' [,58] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
+#' [,59] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
+#' [,60] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
+#' [,61] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
+#' [,62] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
+#' [,63] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
+#' [,64] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
+#' [,65] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
+#' [,66] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
+#' [,67] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
 #' [,68] \tab DETECTOR_ID \tab \code{raw} \tab 07-08 \tab Detector ID\cr
 #' [,69] \tab LOWERFILTER_ID \tab \code{integer} \tab 07-08 \tab Lower filter ID in reader\cr
 #' [,70] \tab UPPERFILTER_ID \tab \code{integer} \tab 07-08 \tab Uper filter ID in reader\cr
@@ -100,10 +104,9 @@ NULL
 #' [,75] \tab MARKPOS_Y2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
 #' [,76] \tab MARKPOS_X3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
 #' [,77] \tab MARKPOS_Y3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
-#' [,78] \tab MARKPOS_X4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
-#' [,79] \tab MARKPOS_Y4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
-#' [,80] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
-#' [,81] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown
+#' [,78] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
+#' [,79] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown\cr
+#' [,80] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name
 #' } V = BIN-file version (RLum means that it does not depend on a specific BIN
 #' version)\cr
 #'
@@ -143,7 +146,7 @@ NULL
 #' @section Objects from the Class: Objects can be created by calls of the form
 #' \code{new("Risoe.BINfileData", ...)}.
 #'
-#' @section Function version: 0.3.0
+#' @section Function version: 0.3.3
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -170,51 +173,147 @@ setClass("Risoe.BINfileData",
            METADATA = "data.frame",
            DATA = "list",
            .RESERVED = "list"
-           )
+           ),
+         prototype = prototype(
+           METADATA = data.frame(
+             ID = integer(),
+             SEL = logical(),
+             VERSION = integer(),
+             LENGTH = integer(),
+             PREVIOUS = integer(),
+             NPOINTS = integer(),
+             RECTYPE = integer(),
+             RUN = integer(),
+             SET = integer(),
+             POSITION = integer(),
+             GRAIN = integer(),
+             GRAINNUMBER = integer(),
+             CURVENO = integer(),
+             XCOORD = integer(),
+             YCOORD = integer(),
+             SAMPLE = character(),
+             COMMENT = character(),
+             SYSTEMID = integer(),
+             FNAME = character(),
+             USER = character(),
+             TIME = character(),
+             DATE = character(),
+             DTYPE = character(),
+             BL_TIME = numeric(),
+             BL_UNIT = integer(),
+             NORM1 = numeric(),
+             NORM2 = numeric(),
+             NORM3 = numeric(),
+             BG = numeric(),
+             SHIFT = integer(),
+             TAG = integer(),
+             LTYPE = character(),
+             LIGHTSOURCE = character(),
+             LPOWER = numeric(),
+             LIGHTPOWER = numeric(),
+             LOW = numeric(),
+             HIGH = numeric(),
+             RATE = numeric(),
+             TEMPERATURE = numeric(),
+             MEASTEMP = numeric(),
+             AN_TEMP = numeric(),
+             AN_TIME = numeric(),
+             TOLDELAY = integer(),
+             TOLON = integer(),
+             TOLOFF = integer(),
+             IRR_TIME = numeric(),
+             IRR_TYPE = integer(),
+             IRR_UNIT = integer(),
+             IRR_DOSERATE = numeric(),
+             IRR_DOSERATEERR = numeric(),
+             TIMESINCEIRR = numeric(),
+             TIMETICK = numeric(),
+             ONTIME = numeric(),
+             OFFTIME = numeric(),
+             STIMPERIOD = integer(),
+             GATE_ENABLED = numeric(),
+             ENABLE_FLAGS = numeric(),
+             GATE_START = numeric(),
+             GATE_STOP = numeric(),
+             PTENABLED = numeric(),
+             DTENABLED = numeric(),
+             DEADTIME = numeric(),
+             MAXLPOWER = numeric(),
+             XRF_ACQTIME = numeric(),
+             XRF_HV = numeric(),
+             XRF_CURR = numeric(),
+             XRF_DEADTIMEF = numeric(),
+             DETECTOR_ID = integer(),
+             LOWERFILTER_ID = integer(),
+             UPPERFILTER_ID = integer(),
+             ENOISEFACTOR = numeric(),
+             MARKPOS_X1 = numeric(),
+             MARKPOS_Y1 = numeric(),
+             MARKPOS_X2 = numeric(),
+             MARKPOS_Y2 = numeric(),
+             MARKPOS_X3 = numeric(),
+             MARKPOS_Y3 = numeric(),
+             EXTR_START = numeric(),
+             EXTR_END = numeric(),
+             SEQUENCE = character(),
+             stringsAsFactors=FALSE
+           ),
+           DATA = list(),
+           .RESERVED = list()
+          )
          )
 
+
 ##set generic S4 function for object
 #' @describeIn Risoe.BINfileData
 #' Show structure of RLum and Risoe.BINfile class objects
 #' @export
-setMethod("show", signature(object = "Risoe.BINfileData"),
-          function(object){
+setMethod(f = "show",
+          signature = signature(object = "Risoe.BINfileData"),
+          definition = function(object){
 
-            version<-paste(unique(object at METADATA[,"VERSION"]), collapse = ", ")
-            systemID<-paste(unique(object at METADATA[,"SYSTEMID"]), collapse = ", ")
-            filename <- as.character(object at METADATA[1,"FNAME"])
-            records.overall<-length(object at DATA)
-            records.type<-table(object at METADATA[,"LTYPE"])
-            user<-paste(unique(as.character(object at METADATA[,"USER"])), collapse = ", ")
-            date<-paste(unique(as.character(object at METADATA[,"DATE"])), collapse = ", ")
-            run.range<-range(object at METADATA[,"RUN"])
-            set.range<-range(object at METADATA[,"SET"])
-            grain.range <- range(object at METADATA[,"GRAIN"])
-            pos.range<-range(object at METADATA[,"POSITION"])
+            if(nrow(object at METADATA) != 0){
+              version<-paste(unique(object at METADATA[,"VERSION"]), collapse = ", ")
+              systemID<-paste(unique(object at METADATA[,"SYSTEMID"]), collapse = ", ")
+              filename <- as.character(object at METADATA[1,"FNAME"])
+              records.overall<-length(object at DATA)
+              records.type<-table(object at METADATA[,"LTYPE"])
+              user<-paste(unique(as.character(object at METADATA[,"USER"])), collapse = ", ")
+              date<-paste(unique(as.character(object at METADATA[,"DATE"])), collapse = ", ")
+              run.range<-range(object at METADATA[,"RUN"])
+              set.range<-range(object at METADATA[,"SET"])
+              grain.range <- range(object at METADATA[,"GRAIN"])
+              pos.range<-range(object at METADATA[,"POSITION"])
 
-            records.type.count <- sapply(1:length(records.type),
-              function(x){paste(
-              names(records.type)[x],"\t(n = ",records.type[x],")",sep="")
-              })
+              records.type.count <- sapply(1:length(records.type),
+                function(x){paste(
+                names(records.type)[x],"\t(n = ",records.type[x],")",sep="")
+                })
 
-            records.type.count <- paste(records.type.count,
-                                        collapse="\n\t                      ")
+              records.type.count <- paste(records.type.count,
+                                          collapse="\n\t                      ")
 
-            ##print
-            cat("\n[Risoe.BINfileData object]")
-            cat("\n\n\tBIN/BINX version     ", version)
-            if(version>=6){
-              cat("\n\tFile name:           ", filename)
-            }
-            cat("\n\tObject date:         ", date)
-            cat("\n\tUser:                ", user)
-            cat("\n\tSystem ID:           ", ifelse(systemID == 0,"0 (unknown)", systemID))
-            cat("\n\tOverall records:     ", records.overall)
-            cat("\n\tRecords type:        ", records.type.count)
-            cat("\n\tPosition range:      ",pos.range[1],":",pos.range[2])
-            cat("\n\tGrain range:         ",grain.range[1],":",grain.range[2])
-            cat("\n\tRun range:           ",run.range[1],":",run.range[2])
-            cat("\n\tSet range:           ",set.range[1],":",set.range[2])
+              ##print
+              cat("\n[Risoe.BINfileData object]")
+              cat("\n\n\tBIN/BINX version     ", version)
+              if(version>=6){
+                cat("\n\tFile name:           ", filename)
+              }
+              cat("\n\tObject date:         ", date)
+              cat("\n\tUser:                ", user)
+              cat("\n\tSystem ID:           ", ifelse(systemID == 0,"0 (unknown)", systemID))
+              cat("\n\tOverall records:     ", records.overall)
+              cat("\n\tRecords type:        ", records.type.count)
+              cat("\n\tPosition range:      ",pos.range[1],":",pos.range[2])
+              cat("\n\tGrain range:         ",grain.range[1],":",grain.range[2])
+              cat("\n\tRun range:           ",run.range[1],":",run.range[2])
+              cat("\n\tSet range:           ",set.range[1],":",set.range[2])
+
+            }else{
+              cat("\n[Risoe.BINfileData object]")
+              cat("\n\n >> This object is empty!<<")
+
+             }
           }#end function
           )#end setMethod
 
@@ -233,22 +332,22 @@ setMethod("show", signature(object = "Risoe.BINfileData"),
 #' @param .RESERVED Object of class "list" containing list of undocumented raw
 #' values for internal use only.
 #' @export
-setMethod("set_Risoe.BINfileData",
-          signature = c(
-            METADATA = "data.frame", DATA = "list", .RESERVED = "ANY"
-          ),
+setMethod(f = "set_Risoe.BINfileData",
+          signature = signature("ANY"),
+          definition = function(METADATA, DATA, .RESERVED) {
 
-          function(METADATA, DATA, .RESERVED) {
-            if (missing(.RESERVED)) {
-              .RESERVED <- list()
-            }
+            if(length(METADATA) == 0){
+              new("Risoe.BINfileData")
 
-            new(
-              "Risoe.BINfileData",
-              METADATA = METADATA,
-              DATA = DATA,
-              .RESERVED = .RESERVED
-            )
+            }else{
+              new(
+                "Risoe.BINfileData",
+                METADATA = METADATA,
+                DATA = DATA,
+                .RESERVED = .RESERVED
+              )
+
+            }
 
           })
 
diff --git a/R/analyse_FadingMeasurement.R b/R/analyse_FadingMeasurement.R
new file mode 100644
index 0000000..33452cf
--- /dev/null
+++ b/R/analyse_FadingMeasurement.R
@@ -0,0 +1,821 @@
+#' Analyse fading measurements and returns the fading rate per decade (g-value)
+#'
+#' The function analysis fading measurements and returns a fading rate including an error estimation.
+#' The function is not limited to standard fading measurements, as can be seen, e.g., Huntley and
+#' Lamothe 2001. Additionally, the density of recombination centres (rho') is estimated after
+#' Kars et al. 2008.
+#'
+#' All provided output corresponds to the \eqn{tc} value obtained by this analysis. Additionally
+#' in the output object the g-value normalised to 2-days is provided. The output of this function
+#' can be passed to the function \code{\link{calc_FadingCorr}}.\cr
+#'
+#' \bold{Fitting and error estimation}\cr
+#'
+#' For the fitting the function \code{\link[stats]{lm}} is used without applying weights. For the
+#' error estimation all input values, except tc, as the precision can be consdiered as sufficiently
+#' high enough with regard to the underlying problem, are sampled assuming a normal distribution
+#' for each value with the value as the mean and the provided uncertainty as standard deviation. \cr
+#'
+#' \bold{Density of recombination centres}
+#'
+#' The density of recombination centres, expressed by the dimensionless variable rho', is estimated
+#' by fitting equation 5 in Kars et al. 2008 to the data. For the fitting the function
+#' \code{\link[stats]{nls}} is used without applying weights. For the error estimation the same
+#' procedure as for the g-value is applied (see above).
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}): input object with the
+#' measurement data. Alternatively, a \code{\link{list}} containing \code{\linkS4class{RLum.Analysis}}
+#' objects or a \code{\link{data.frame}} with three columns
+#' (x = LxTx, y = LxTx error, z = time since irradiation) can be provided.
+#' Can also be a wide table, i.e. a \code{\link{data.frame}} with a number of colums divisible by 3
+#' and where each triplet has the before mentioned column structure.
+#'
+#' @param structure \code{\link{character}} (with default): sets the structure of the measurement
+#' data. Allowed are \code{'Lx'} or \code{c('Lx','Tx')}. Other input is ignored
+#'
+#' @param signal.integral \code{\link{vector}} (\bold{required}): vector with the
+#' limits for the signal integral. Not required if a \code{data.frame} with LxTx values are
+#' provided.
+#'
+#' @param background.integral \code{\link{vector}} (\bold{required}): vector with the
+#' bounds for the background integral. Not required if a \code{data.frame} with LxTx values are
+#' provided.
+#'
+#' @param t_star \code{\link{character}} (with default): method for calculating the time elasped
+#' since irradiaton. Options are: \code{'half'}, which is \eqn{t_star := t_1 + (t_2 - t_1)/2} (Auclair et al., 2003)
+#' and \code{'end'}, which takes the time between irradiation and the measurement step. Default is \code{'half'}
+#'
+#' @param n.MC \code{\link{integer}} (with default): number for Monte Carlo runs for the error
+#' estimation
+#'
+#' @param verbose \code{\link{logical}} (with default): enables/disables verbose mode
+#'
+#' @param plot \code{\link{logical}} (with default): enables/disables plot output
+#'
+#' @param plot.single \code{\link{logical}} (with default): enables/disables single plot
+#' mode, i.e. one plot window per plot. Alternatively a vector specifying the plot to be drawn, e.g.,
+#' \code{plot.single = c(3,4)} draws only the last two plots
+#'
+#' @param \dots (optional) further arguments that can be passed to internally used functions (see details)
+#'
+#' @return An \code{\linkS4class{RLum.Results}} object is returned:
+#'
+#' Slot: \bold{@data}\cr
+#'
+#' \tabular{lll}{
+#' \bold{OBJECT} \tab \code{TYPE} \tab \code{COMMENT}\cr
+#' \code{fading_results} \tab \code{data.frame} \tab results of the fading measurement in a table \cr
+#' \code{fit} \tab \code{lm} \tab object returned by the used linear fitting function \code{\link[stats]{lm}}\cr
+#' \code{rho_prime} \tab \code{data.frame} \tab results of rho' estimation after Kars et al. 2008 \cr
+#' \code{LxTx_table} \tab \code{data.frame} \tab Lx/Tx table, if curve data had been provided \cr
+#' \code{irr.times} \tab \code{integer} \tab vector with the irradiation times in seconds \cr
+#' }
+#'
+#' Slot: \bold{@info}\cr
+#'
+#' \tabular{lll}{
+#' \bold{OBJECT} \tab \code{TYPE} \tab \code{COMMENT}\cr
+#' \code{call} \tab \code{call} \tab the original function call\cr
+#'
+#' }
+#'
+#'
+#' @section Function version: 0.1.5
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
+#' Christoph Burow, University of Cologne (Germany)
+#'
+#' @note \bold{This function has BETA status and should not be used for publication work!}
+#'
+#' @keywords datagen
+#'
+#' @references
+#'
+#' Auclair, M., Lamothe, M., Huot, S., 2003. Measurement of anomalous fading for feldpsar IRSL using
+#' SAR. Radiation Measurements 37, 487-492. doi:10.1016/S1350-4487(03)00018-0
+#'
+#' Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement
+#' and correction for it in optical dating. Canadian Journal of Earth Sciences 38,
+#' 1093-1106. doi:10.1139/cjes-38-7-1093
+#'
+#' Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar
+#' IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021
+#'
+#' @seealso \code{\link{calc_OSLLxTxRatio}}, \code{\link{read_BIN2R}}, \code{\link{read_XSYG2R}},
+#' \code{\link{extract_IrradiationTimes}}
+#'
+#' @examples
+#'
+#' ## load example data (sample UNIL/NB123, see ?ExampleData.Fading)
+#' data("ExampleData.Fading", envir = environment())
+#'
+#' ##(1) get fading measurement data (here a three column data.frame)
+#' fading_data <- ExampleData.Fading$fading.data$IR50
+#'
+#' ##(2) run analysis
+#' g_value <- analyse_FadingMeasurement(
+#' fading_data,
+#' plot = TRUE,
+#' verbose = TRUE,
+#' n.MC = 10)
+#'
+#' ##(3) this can be further used in the function
+#' ## to correct the age according to Huntley & Lamothe, 2001
+#' results <- calc_FadingCorr(
+#' age.faded = c(100,2),
+#' g_value = g_value,
+#' n.MC = 10)
+#'
+#'
+#' @export
+analyse_FadingMeasurement <- function(
+  object,
+  structure = c("Lx", "Tx"),
+  signal.integral,
+  background.integral,
+  t_star = 'half',
+  n.MC = 100,
+  verbose = TRUE,
+  plot = TRUE,
+  plot.single = FALSE,
+  ...
+){
+
+  # Integrity Tests -----------------------------------------------------------------------------
+  if (is(object, "list")) {
+    if (!unique(sapply(object, class)) == "RLum.Analysis") {
+      stop(
+        "[analyse_FadingMeasurement()] 'object' expects an 'RLum.Analysis' object or a 'list' of such objects!"
+      )
+
+    }
+
+  } else if (class(object) == "RLum.Analysis") {
+    object <- list(object)
+
+  } else if(class(object) == "data.frame"){
+    if (ncol(object) %% 3 != 0) {
+      stop("[analyse_FadingMeasurement()] 'object': if you provide a data.frame as input, the number of columns must be a multiple of 3.")
+    } else {
+      object <- do.call(rbind,
+                        lapply(seq(1, ncol(object), 3), function(col) {
+                          setNames(object[ , col:c(col+2)], c("LxTx", "LxTxError", "timeSinceIrr"))
+                          })
+                        )
+      object <- object[complete.cases(object), ]
+    }
+
+    ##set table and object
+    LxTx_table <- data.frame(LxTx = object[[1]], LxTx.Error = object[[2]])
+    TIMESINCEIRR <- object[[3]]
+    irradiation_times <- TIMESINCEIRR
+    object <- NULL
+
+
+  }else{
+    stop(
+      "[analyse_FadingMeasurement()] 'object' needs to be of type 'RLum.Analysis' or a 'list' of such objects!"
+    )
+
+  }
+
+
+  # Prepare data --------------------------------------------------------------------------------
+  if(!is.null(object)){
+
+    ##support read_XSYG2R()
+    if(length(unique(unlist(lapply(object, slot, name = "originator")))) == 1 &&
+       unique(unlist(lapply(object, slot, name = "originator"))) == "read_XSYG2R"){
+
+      irradiation_times <- extract_IrradiationTimes(object = object)
+
+      ##reduce irradiation times ... extract curve data
+      TIMESINCEIRR <- unlist(lapply(irradiation_times, function(x) {
+
+        ##get time since irradiation
+        temp_TIMESINCEIRR <-
+          x$irr.times[["TIMESINCEIRR"]][!grepl(pattern = "irradiation",
+                                               x = x$irr.times[["STEP"]],
+                                               fixed = TRUE)]
+
+        ##substract half irradiation time
+        temp_IRR_TIME <-
+          x$irr.times[["IRR_TIME"]][!grepl(pattern = "irradiation",
+                                           x = x$irr.times[["STEP"]],
+                                           fixed = TRUE)]
+
+        ##in accordance with Auclair et al., 2003, p. 488
+        ##but here we have no t1 ... this needs to be calculated
+        ##set variables
+        t1 <- temp_TIMESINCEIRR
+        t2 <- temp_TIMESINCEIRR + temp_IRR_TIME
+
+        if(t_star == "half"){
+          ##calculate t_star
+          t_star <- t1 + (t2 - t1)/2
+
+        }else if (t_star == "end"){
+          ##set t_start as t_1 (so after the end of irradiation)
+          t_star <- t1
+
+        }else{
+          stop("[analyse_FadingMeasurement()] Invalid value for t_star.")
+
+        }
+
+        return(t_star)
+
+      }))
+
+      ##clean object by removing the irradiation step ... and yes, we drop!
+      object_clean <- unlist(get_RLum(object, curveType = "measured"))
+
+      ##support read_BIN2R()
+    }else if (length(unique(unlist(lapply(object, slot, name = "originator")))) == 1 &&
+              unique(unlist(lapply(object, slot, name = "originator"))) == "read_BIN2R"){
+      try(stop("[analyse_FadingMeasurement()] Analysing data imported from a BIN-file is currently not supported!", call. = FALSE))
+      return(NULL)
+
+      ##not support
+    }else{
+      try(stop("[analyse_FadingMeasurement()] Unknown or unsupported originator!", call. = FALSE))
+      return(NULL)
+
+    }
+
+    # Calculation ---------------------------------------------------------------------------------
+
+    ##calculate Lx/Tx or ... just Lx, it depends on the patttern ... set IRR_TIME
+    if(length(structure) == 2){
+      Lx_data <- object_clean[seq(1,length(object_clean), by = 2)]
+      Tx_data <- object_clean[seq(2,length(object_clean), by = 2)]
+
+      ##we need only every 2nd irradiation time, the one from the Tx should be the same ... all the time
+      TIMESINCEIRR <- TIMESINCEIRR[seq(1,length(TIMESINCEIRR), by =2)]
+
+
+    }else if(length(structure) == 1){
+      Lx_data <- object_clean
+      Tx_data <- NULL
+
+    }else{
+      try(stop("[analyse_FadingMeasurement()] I have no idea what your structure means!", call. = FALSE))
+      return(NULL)
+
+    }
+
+    ##calculate Lx/Tx table
+    LxTx_table <- merge_RLum(lapply(1:length(Lx_data), function(x) {
+      calc_OSLLxTxRatio(
+        Lx.data = Lx_data[[x]],
+        Tx.data = Tx_data[[x]],
+        signal.integral = signal.integral,
+        background.integral = background.integral,
+        signal.integral.Tx = list(...)$signal.integral.Tx,
+        background.integral.Tx = list(...)$background.integral.Tx,
+        sigmab = list(...)$sigmab,
+        sig0 = if(
+          is.null(list(...)$sig0)){
+          formals(calc_OSLLxTxRatio)$sig0
+        }else{
+          list(...)$sig0
+        },
+        background.count.distribution = if(
+          is.null(list(...)$background.count.distribution)){
+          formals(calc_OSLLxTxRatio)$background.count.distribution
+        }else{
+          list(...)$background.count.distribution
+        }
+      )
+
+    }))$LxTx.table
+
+  }
+
+
+  ##create unique identifier
+  uid <- .create_UID()
+
+  ##normalise data to prompt measurement
+  tc <- min(TIMESINCEIRR)[1]
+
+  ##normalise
+  if(length(structure) == 2 | is.null(object)){
+    LxTx_NORM <-
+      LxTx_table[["LxTx"]] / LxTx_table[["LxTx"]][which(TIMESINCEIRR== tc)[1]]
+    LxTx_NORM.ERROR <-
+      LxTx_table[["LxTx.Error"]] / LxTx_table[["LxTx"]][which(TIMESINCEIRR == tc)[1]]
+
+
+  }else{
+    LxTx_NORM <-
+      LxTx_table[["Net_LnLx"]] / LxTx_table[["Net_LnLx"]][which(TIMESINCEIRR== tc)[1]]
+    LxTx_NORM.ERROR <-
+       LxTx_table[["Net_LnLx.Error"]] / LxTx_table[["Net_LnLx"]][which(TIMESINCEIRR == tc)[1]]
+
+  }
+
+
+  ##normalise time since irradtion
+  TIMESINCEIRR_NORM <- TIMESINCEIRR/tc
+
+  ##add dose and time since irradiation
+  LxTx_table <-
+    cbind(
+      LxTx_table,
+      TIMESINCEIRR = TIMESINCEIRR,
+      TIMESINCEIRR_NORM = TIMESINCEIRR_NORM,
+      TIMESINCEIRR_NORM.LOG = log10(TIMESINCEIRR_NORM),
+      LxTx_NORM = LxTx_NORM,
+      LxTx_NORM.ERROR = LxTx_NORM.ERROR,
+      UID = uid
+    )
+
+
+  # Fitting -------------------------------------------------------------------------------------
+  ##we need to fit the data to get the g_value
+
+  ##sample for monte carlo runs
+  MC_matrix <- cbind(LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+                     matrix(rnorm(
+                       n = n.MC * nrow(LxTx_table),
+                       mean = LxTx_table[["LxTx_NORM"]],
+                       sd = LxTx_table[["LxTx_NORM.ERROR"]]
+                     ),
+                     ncol = n.MC))
+
+
+  ##apply the fit
+  fit_matrix <- vapply(X = 2:(n.MC+1), FUN = function(x){
+
+    ##fit
+    stats::lm(y~x, data = data.frame(
+      x = MC_matrix[,1],
+      y = MC_matrix[,x]))$coefficients
+
+
+  }, FUN.VALUE = vector("numeric", length = 2))
+
+
+  ##calculate g-values from matrix
+  g_value.MC <- abs(fit_matrix[2, ]) * 1 / fit_matrix[1, ] * 100
+
+  ##calculate rho prime (Kars et al. 2008; proposed by Georgina King)
+
+  ##s value after Huntley (2006) J. Phys. D.
+  Hs <- 3e15
+
+  ##sample for monte carlo runs
+  MC_matrix_rhop <-  matrix(rnorm(
+    n = n.MC * nrow(LxTx_table),
+    mean = LxTx_table[["LxTx_NORM"]],
+    sd = LxTx_table[["LxTx_NORM.ERROR"]]
+  ), ncol = n.MC)
+
+  ## calculate rho prime for all MC samples
+  fit_vector_rhop <- apply(MC_matrix_rhop, MARGIN = 2, FUN = function(x) {
+    tryCatch({
+      coef(minpack.lm::nlsLM(x ~ c * exp(-rhop * (log(1.8 * Hs * LxTx_table$TIMESINCEIRR))^3),
+                             start = list(c = x[1], rhop = 10^-5.5)))[["rhop"]]
+    },
+    error = function(e) {
+      return(NA)
+    })
+  })
+
+  ## discard all NA values produced in MC runs
+  fit_vector_rhop <- fit_vector_rhop[!is.na(fit_vector_rhop)]
+
+  ## calculate mean and standard deviation of rho prime (in log10 space)
+  rhoPrime <- data.frame(
+    MEAN = mean(fit_vector_rhop),
+    SD = sd(fit_vector_rhop),
+    Q_0.025 = quantile(x = fit_vector_rhop, probs = 0.025),
+    Q_0.16 = quantile(x = fit_vector_rhop, probs = 0.16),
+    Q_0.84 = quantile(x = fit_vector_rhop, probs = 0.84),
+    Q_0.975 = quantile(x = fit_vector_rhop, probs = 0.975),
+    row.names = NULL
+  )
+
+  ##for plotting
+  fit <-
+    stats::lm(y ~ x,
+              data = data.frame(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+                                y = LxTx_table[["LxTx_NORM"]]))
+
+
+  fit_power <- stats::lm(y ~ I(x^3) + I(x^2) + I(x) ,
+                         data = data.frame(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+                                           y = LxTx_table[["LxTx_NORM"]]))
+
+
+  ##for predicting
+  fit_predict <-
+    stats::lm(y ~ x, data = data.frame(y = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+                                       x = LxTx_table[["LxTx_NORM"]]))
+
+  ##calculate final g_value
+  ##the 2nd term corrects for the (potential) offset from one
+  g_value_fit <- abs(fit$coefficient[2]) * 1 / fit$coefficient[1] * 100
+
+  ##construct output data.frame
+  g_value <- data.frame(
+    FIT =  g_value_fit,
+    MEAN = mean(g_value.MC),
+    SD = sd(g_value.MC),
+    Q_0.025 = quantile(x = g_value.MC, probs = 0.025),
+    Q_0.16 = quantile(x = g_value.MC, probs = 0.16),
+    Q_0.84 = quantile(x = g_value.MC, probs = 0.84),
+    Q_0.975 = quantile(x = g_value.MC, probs = 0.975)
+  )
+
+  ##normalise the g-value to 2-days using the equation provided by Sebastien Huot via e-mail
+  ##this means the data is extended
+  k0 <- g_value[,c("FIT", "SD")] / 100 / log(10)
+  k1 <- k0 / (1 - k0 * log(172800/tc))
+  g_value_2days <-  100 * k1 * log(10)
+  names(g_value_2days) <- c("G_VALUE_2DAYS", "G_VALUE_2DAYS.ERROR")
+
+  # Approximation -------------------------------------------------------------------------------
+  T_0.5.interpolated <- approx(x = LxTx_table[["LxTx_NORM"]],
+                               y = LxTx_table[["TIMESINCEIRR_NORM"]],
+                               xout = 0.5)
+
+  T_0.5.predict <- stats::predict.lm(fit_predict,newdata = data.frame(x = 0.5), interval = "predict")
+
+  T_0.5 <- data.frame(
+    T_0.5_INTERPOLATED = T_0.5.interpolated$y,
+    T_0.5_PREDICTED =  (10^T_0.5.predict[,1])*tc,
+    T_0.5_PREDICTED.LOWER =  (10^T_0.5.predict[,2])*tc,
+    T_0.5_PREDICTED.UPPER =  (10^T_0.5.predict[,2])*tc
+
+  )
+
+  # Plotting ------------------------------------------------------------------------------------
+  if(plot) {
+    if (!plot.single[1]) {
+      par.default <- par()$mfrow
+      on.exit(par(mfrow = par.default))
+      par(mfrow = c(2, 2))
+
+    }
+
+    ##get package
+    col <- get("col", pos = .LuminescenceEnv)
+
+    ##set some plot settings
+    plot_settings <- list(
+      xlab = "Stimulation time [s]",
+      log = "",
+      mtext = ""
+
+    )
+
+    ##modify on request
+    plot_settings <- modifyList(x = plot_settings, val = list(...))
+
+    ##get unique irradiation times ... for plotting
+    irradiation_times.unique <- unique(TIMESINCEIRR)
+
+    ##limit to max 5
+    if(length(irradiation_times.unique) >= 5){
+      irradiation_times.unique <-
+        irradiation_times.unique[seq(1, length(irradiation_times.unique),
+                                     length.out = 5)]
+
+    }
+
+
+    if (!is.null(object)) {
+      if (length(structure) == 2) {
+
+        if (is(plot.single, "logical") ||
+            (is(plot.single, "numeric") & 1 %in% plot.single)) {
+          plot_RLum(
+            set_RLum(class = "RLum.Analysis", records = object_clean[seq(1, length(object_clean), by = 2)]),
+            combine = TRUE,
+            col = c(col[1:5], rep(
+              rgb(0, 0, 0, 0.3), length(TIMESINCEIRR) - 5
+            )),
+            plot.single = TRUE,
+            legend.text = c(paste(irradiation_times.unique, "s"), "others"),
+            legend.col = c(col[1:length(irradiation_times.unique)], rgb(0, 0, 0, 0.3)),
+            xlab = plot_settings$xlab,
+            log = plot_settings$log,
+            legend.pos = "outside",
+            main = expression(paste(L[x], " - curves")),
+            mtext = plot_settings$mtext
+          )
+
+          ##add integration limits
+          abline(
+            v = range(signal.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+            lty = 2,
+            col = "green"
+          )
+          abline(
+            v = range(background.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+            lty = 2,
+            col = "red"
+          )
+        }
+
+        if (is(plot.single, "logical") ||
+            (is(plot.single, "numeric") & 2 %in% plot.single)) {
+          plot_RLum(
+            set_RLum(class = "RLum.Analysis", records = object_clean[seq(2, length(object_clean), by = 2)]),
+            combine = TRUE,
+            col = c(col[1:5], rep(
+              rgb(0, 0, 0, 0.3), length(TIMESINCEIRR) - 5
+            )),
+            plot.single = TRUE,
+            legend.text = c(paste(irradiation_times.unique, "s"), "others"),
+            legend.col = c(col[1:length(irradiation_times.unique)], rgb(0, 0, 0, 0.3)),
+            xlab = plot_settings$xlab,
+            log = plot_settings$log,
+            legend.pos = "outside",
+            main = expression(paste(T[x], " - curves")),
+            mtext = plot_settings$mtext
+          )
+
+          if (is.null(list(...)$signal.integral.Tx)) {
+            ##add integration limits
+            abline(
+              v = range(signal.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+              lty = 2,
+              col = "green"
+            )
+            abline(
+              v = range(background.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+              lty = 2,
+              col = "red"
+            )
+
+          } else{
+            ##add integration limits
+            abline(
+              v = range(list(...)$signal.integral.Tx) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+              lty = 2,
+              col = "green"
+            )
+            abline(
+              v = range(list(...)$background.integral.Tx) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+              lty = 2,
+              col = "red"
+            )
+
+          }
+
+
+        }
+
+      } else{
+        if (is(plot.single, "logical") ||
+            (is(plot.single, "numeric") & 1 %in% plot.single)) {
+          plot_RLum(
+            set_RLum(class = "RLum.Analysis", records = object_clean),
+            combine = TRUE,
+            col = c(col[1:5], rep(
+              rgb(0, 0, 0, 0.3), length(TIMESINCEIRR) - 5
+            )),
+            plot.single = TRUE,
+            legend.text = c(paste(irradiation_times.unique, "s"), "others"),
+            legend.col = c(col[1:length(irradiation_times.unique)], rgb(0, 0, 0, 0.3)),
+            legend.pos = "outside",
+            xlab = plot_settings$xlab,
+            log = plot_settings$log,
+            main = expression(paste(L[x], " - curves")),
+            mtext = plot_settings$mtext
+          )
+
+          ##add integration limits
+          abline(
+            v = range(signal.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+            lty = 2,
+            col = "green"
+          )
+          abline(
+            v = range(background.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])),
+            lty = 2,
+            col = "red"
+          )
+
+        }
+
+        ##empty Tx plot
+        if (is(plot.single, "logical") ||
+            (is(plot.single, "numeric") & 2 %in% plot.single)) {
+          plot(
+            NA,
+            NA,
+            xlim = c(0, 1),
+            ylim = c(0, 1),
+            xlab = "",
+            ylab = "",
+            axes = FALSE
+          )
+          text(x = 0.5,
+               y = 0.5,
+               labels = expression(paste("No ", T[x], " curves detected")))
+
+        }
+
+      }
+
+    }else{
+      if (is(plot.single, "logical") ||
+          (is(plot.single, "numeric") & 1 %in% plot.single)) {
+        ##empty Lx plot
+        plot(
+          NA,
+          NA,
+          xlim = c(0, 1),
+          ylim = c(0, 1),
+          xlab = "",
+          ylab = "",
+          axes = FALSE
+        )
+        text(x = 0.5,
+             y = 0.5,
+             labels = expression(paste("No ", L[x], " curves detected")))
+
+      }
+
+      if (is(plot.single, "logical") ||
+          (is(plot.single, "numeric") & 2 %in% plot.single)) {
+        ##empty Tx plot
+        plot(
+          NA,
+          NA,
+          xlim = c(0, 1),
+          ylim = c(0, 1),
+          xlab = "",
+          ylab = "",
+          axes = FALSE
+        )
+        text(x = 0.5,
+             y = 0.5,
+             labels = expression(paste("No ", T[x], " curves detected")))
+
+
+      }
+    }
+
+    ##(2) Fading plot
+    if (is(plot.single, "logical") ||
+        (is(plot.single, "numeric") & 3 %in% plot.single)) {
+      plot(
+        NA,
+        NA,
+        ylab = "Normalised intensity [a.u.]",
+        xaxt = "n",
+        xlab = "Time since irradition [s]",
+        sub = expression(paste("[", log[10](t / t[c]), "]")),
+        ylim = if (max(LxTx_table[["LxTx_NORM"]]) > 1.1) {
+          c(0.1, max(LxTx_table[["LxTx_NORM"]]) + max(LxTx_table[["LxTx_NORM.ERROR"]]))
+        } else{
+          c(0.1, 1.1)
+        },
+        xlim = range(LxTx_table[["TIMESINCEIRR_NORM.LOG"]]),
+        main = "Signal Fading"
+      )
+
+      ##add axis
+      axis(side = 1,
+           at = axTicks(side = 1),
+           labels = suppressWarnings(format((10 ^ (axTicks(side = 1)) * tc),
+                                            digits = 0,
+                                            decimal.mark = "",
+                                            scientific = TRUE
+           )))
+
+      mtext(
+        side = 3,
+        paste0(
+          "g-value: ",
+          round(g_value$FIT, digits = 2),
+          " \u00b1 ",
+          round(g_value$SD, digits = 2),
+          " (%/decade) | tc = ",
+          format(tc, digits = 4, scientific = TRUE)
+        ),
+        cex = par()$cex * 0.9
+      )
+
+      ##add curves
+      x <- NA
+      for (i in 1:n.MC) {
+        curve(fit_matrix[2, i] * x + fit_matrix[1, i],
+              col = rgb(0, 0.2, 0.4, 0.2),
+              add = TRUE)
+
+      }
+
+      ##add master curve in red
+      curve(
+        fit$coefficient[2] * x + fit$coefficient[1],
+        col = "red",
+        add = TRUE,
+        lwd = 1.5
+      )
+
+      ##add power law curve
+      curve(
+        x ^ 3 * fit_power$coefficient[2] + x ^ 2 * fit_power$coefficient[3] + x * fit_power$coefficient[4] + fit_power$coefficient[1],
+        add = TRUE,
+        col = "blue",
+        lty = 2
+      )
+
+      ##addpoints
+      points(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+             y = LxTx_table[["LxTx_NORM"]],
+             pch = 21,
+             bg = "grey")
+
+      ##error bars
+      segments(
+        x0 = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+        x1 = LxTx_table[["TIMESINCEIRR_NORM.LOG"]],
+        y0 = LxTx_table[["LxTx_NORM"]] + LxTx_table[["LxTx_NORM.ERROR"]],
+        y1 = LxTx_table[["LxTx_NORM"]] - LxTx_table[["LxTx_NORM.ERROR"]],
+        col = "grey"
+
+      )
+
+      ##add legend
+      legend(
+        "bottom",
+        legend = c("fit", "fit MC", "trend"),
+        col = c("red", "grey", "blue"),
+        lty = c(1, 1, 2),
+        bty = "n",
+        horiz = TRUE
+      )
+    }
+
+    if (is(plot.single, "logical") ||
+        (is(plot.single, "numeric") & 4 %in% plot.single)) {
+      plot(density(g_value.MC),
+           main = "Density: g-values (%/decade)")
+      rug(x = g_value.MC)
+      abline(v = c(g_value[["Q_0.16"]], g_value[["Q_0.84"]]),
+             lty = 2,
+             col = "darkgreen")
+      abline(v = c(g_value[["Q_0.025"]], g_value[["Q_0.975"]]),
+             lty = 2,
+             col = "red")
+      legend(
+        "topleft",
+        legend = c("HPD - 68 %", "HPD - 95 %"),
+        lty = 2,
+        col = c("darkgreen", "red"),
+        bty = "n"
+      )
+
+
+    }
+
+  }
+
+  # Terminal ------------------------------------------------------------------------------------
+  if (verbose){
+
+    cat("\n[analyse_FadingMeasurement()]\n")
+    cat(paste0("\n n.MC:\t",n.MC))
+    cat(paste0("\n tc:\t",format(tc, digits = 4, scientific = TRUE), " s"))
+    cat("\n---------------------------------------------------")
+    cat(paste0("\nT_0.5 interpolated:\t",T_0.5$T_0.5_INTERPOLATED))
+    cat(paste0("\nT_0.5 predicted:\t",format(T_0.5$T_0.5_PREDICTED, digits = 2, scientific = TRUE)))
+    cat(paste0("\ng-value:\t\t", round(g_value$FIT, digits = 2), " \u00b1 ", round(g_value$SD, digits = 2),
+               " (%/decade)"))
+    cat(paste0("\ng-value (norm. 2 days):\t", round(g_value_2days[1], digits = 2), " \u00b1 ", round(g_value_2days[2], digits = 2),
+               " (%/decade)"))
+    cat("\n---------------------------------------------------")
+    cat(paste0("\nrho':\t\t\t", format(rhoPrime$MEAN, digits = 3), " \u00b1 ", format(rhoPrime$SD, digits = 3)))
+    cat(paste0("\nlog10(rho'):\t\t", round(log10(rhoPrime$MEAN), 2), " \u00b1 ", round(rhoPrime$SD /  (rhoPrime$MEAN * log(10, base = exp(1))), 2)))
+    cat("\n---------------------------------------------------")
+
+  }
+
+  # Return --------------------------------------------------------------------------------------
+  return(set_RLum(
+    class = "RLum.Results",
+    data = list(
+      fading_results = cbind(
+        g_value,
+        TC = tc,
+        G_VALUE_2DAYS = g_value_2days[1],
+        G_VALUE_2DAYS.ERROR = g_value_2days[2],
+        T_0.5,
+        UID = uid
+      ),
+      fit = fit,
+      rho_prime = rhoPrime,
+      LxTx_table = LxTx_table,
+      irr.times = irradiation_times
+    ),
+    info = list(call = sys.call())
+  ))
+
+}
diff --git a/R/analyse_IRSAR.RF.R b/R/analyse_IRSAR.RF.R
index 7b88c09..51d989d 100644
--- a/R/analyse_IRSAR.RF.R
+++ b/R/analyse_IRSAR.RF.R
@@ -29,7 +29,7 @@
 #' Function used for the fitting (according to Erfurt et al. (2003)): \cr
 #'
 #' \deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta}
-#' with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the inital
+#' with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the initial
 #' IR-RF flux, \eqn{\Delta\phi} the dose dependent change of the IR-RF flux,
 #' \eqn{\lambda} the exponential parameter, \eqn{D} the dose and \eqn{\beta}
 #' the dispersive factor.\cr\cr To obtain the palaeodose \eqn{D_{e}} the
@@ -46,7 +46,8 @@
 #' model. This approach was introduced for RF curves by Buylaert et al., 2012
 #' and Lapp et al., 2012.
 #'
-#' Here the sliding is done by searching for the minimum of the squared residuals.\cr
+#' Here the sliding is done by searching for the minimum of the squared residuals.
+#' For the mathematical details of the implementation see Frouin et al., 2017 \cr
 #'
 #' \bold{\code{method.control}}\cr
 #'
@@ -58,20 +59,29 @@
 #' \tabular{lll}{
 #' ARGUMENT       \tab METHOD               \tab DESCRIPTION\cr
 #' \code{trace}   \tab \code{FIT}, \code{SLIDE} \tab as in \code{\link{nls}}; shows sum of squared residuals\cr
+#' \code{trace_vslide} \tab \code{SLIDE} \tab \code{\link{logical}} argument to enable or disable the tracing of the vertical sliding\cr
 #' \code{maxiter} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
 #' \code{warnOnly} \tab \code{FIT}           \tab as in \code{\link{nls}}\cr
 #' \code{minFactor} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
-#' \code{correct_onset} \tab \code{SLIDE}      \tab The logical argument literally spoken,
-#' shifts the curves along the x-axis by the first channel, as light is expected in the first channel.
-#'  The default value is \code{TRUE}.\cr
+#' \code{correct_onset} \tab \code{SLIDE}      \tab The logical argument shifts the curves along the x-axis by the first channel,
+#' as light is expected in the first channel. The default value is \code{TRUE}.\cr
 #' \code{show_density} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
 #' enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr
 #' \code{show_fit} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
-#' enables or disables the plot of the fitted curve rountinly obtained during the evaluation.\cr
-#'\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (wiht default):
-#' This controls the number of MC runs within the sliding (assesing the possible minimum values).
+#' enables or disables the plot of the fitted curve routinely obtained during the evaluation.\cr
+#'\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (with default):
+#' This controls the number of MC runs within the sliding (assessing the possible minimum values).
 #' The default \code{n.MC = 1000}. Note: This parameter is not the same as controlled by the
-#' function argument \code{n.MC} \cr
+#' function argument \code{n.MC}. \cr
+#' \code{vslide_range} \tab \code{SLDE} \tab \code{\link{logical}} or \code{\link{numeric}} or \code{\link{character}} (with default):
+#' This argument sets the boundaries for a vertical curve
+#' sliding. The argument expects a vector with an absolute minimum and a maximum (e.g., \code{c(-1000,1000)}).
+#' Alternatively the values \code{NULL} and \code{'auto'} are allowed. The automatic mode detects the
+#' reasonable vertical sliding range (\bold{recommended}). \code{NULL} applies no vertical sliding.
+#' The default is \code{NULL}.\cr
+#' \code{cores} \tab \code{SLIDE} \tab \code{number} or \code{character} (with default): set number of cores to be allocated
+#' for a parallel processing of the Monte-Carlo runs. The default value is \code{NULL} (single thread),
+#' the recommended values is \code{'auto'}. An optional number (e.g., \code{cores} = 8) assigns a value manually.
 #' }
 #'
 #'
@@ -163,7 +173,7 @@
 #' for the data analysis. Possible options are \code{"FIT"} or \code{"SLIDE"}.
 #'
 #' @param method.control \code{\link{list}} (optional): parameters to control the method, that can
-#' be passed to the choosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
+#' be passed to the chosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
 #' 'minFactor' and for (2) \code{method = "SLIDE"}: 'correct_onset', 'show_density',  'show_fit', 'trace'.
 #' See details.
 #'
@@ -177,8 +187,8 @@
 #'
 #' @param n.MC \code{\link{numeric}} (with default): set number of Monte
 #' Carlo runs for start parameter estimation (\code{method = "FIT"}) or
-#' error estimation (\code{method = "SLIDE"}). Note: Large values will
-#' significantly increase the computation time
+#' error estimation (\code{method = "SLIDE"}). This value can be set to \code{NULL} to skip the
+#' MC runs. Note: Large values will significantly increase the computation time
 #'
 #' @param txtProgressBar \code{\link{logical}} (with default): enables \code{TRUE} or
 #' disables \code{FALSE} the progression bar during MC runs
@@ -197,49 +207,105 @@
 #' \code{\link[graphics]{legend}}), \code{xaxt}
 #'
 #'
-#' @return A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
-#' returned:\cr
-#'
-#' \bold{@data}\cr
-#' $ data: \code{\link{data.frame}} table with De and corresponding values\cr
-#' ..$ DE : \code{numeric}: the obtained equivalent dose\cr
-#' ..$ DE.ERROR : \code{numeric}: (only method = "SLIDE") standard deviation obtained from MC runs \cr
-#' ..$ DE.LOWER : \code{numeric}: 2.5\% quantile for De values obtained by MC runs \cr
-#' ..$ DE.UPPER : \code{numeric}: 97.5\% quantile for De values obtained by MC runs  \cr
-#' ..$ DE.STATUS  : \code{character}: test parameter status\cr
-#' ..$ RF_NAT.LIM  : \code{charcter}: used RF_nat curve limits \cr
-#' ..$ RF_REG.LIM : \code{character}: used RF_reg curve limits\cr
-#' ..$ POSITION : \code{integer}: (optional) position of the curves\cr
-#' ..$ DATE : \code{character}: (optional) measurement date\cr
-#' ..$ SEQUENCE_NAME : \code{character}: (optional) sequence name\cr
-#' ..$ UID : \code{character}: unique data set ID \cr
-#' $ test_parameters : \code{\link{data.frame}} table test parameters \cr
-#' $ fit : {\code{\link{nls}} \code{nlsModel} object} \cr
-#' $ slide : \code{\link{list}} data from the sliding process, including the sliding matrix\cr
-#'
-#' \bold{@info}\cr
-#' $ call : \code{\link[methods]{language-class}}: the orignal function call \cr
+#' @return The function returns numerical output and an (optional) plot.
+#'
+#' -----------------------------------\cr
+#' [ NUMERICAL OUTPUT ]\cr
+#' -----------------------------------\cr
+#' \bold{\code{RLum.Reuslts}}-object\cr
+#'
+#' \bold{slot:} \bold{\code{@data}} \cr
+#'
+#' [.. $data : \code{data.frame}]\cr
+#'
+#' \tabular{lll}{
+#' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+#'  DE \tab \code{numeric} \tab the obtained equivalent dose\cr
+#'  DE.ERROR \tab \code{numeric} \tab (only \code{method = "SLIDE"}) standard deviation obtained from MC runs \cr
+#'  DE.LOWER \tab \code{numeric}\tab 2.5\% quantile for De values obtained by MC runs \cr
+#'  DE.UPPER \tab \code{numeric}\tab 97.5\% quantile for De values obtained by MC runs  \cr
+#'  DE.STATUS  \tab \code{character}\tab test parameter status\cr
+#'  RF_NAT.LIM  \tab \code{charcter}\tab used RF_nat curve limits \cr
+#'  RF_REG.LIM \tab \code{character}\tab used RF_reg curve limits\cr
+#'  POSITION \tab \code{integer}\tab (optional) position of the curves\cr
+#'  DATE \tab \code{character}\tab (optional) measurement date\cr
+#'  SEQUENCE_NAME \tab \code{character}\tab (optional) sequence name\cr
+#'  UID \tab \code{character}\tab unique data set ID
+#' }
+#'
+#' [.. $De.MC : \code{numeric}]\cr
+#'
+#' A \code{numeric} vector with all the De values obtained by the MC runs.\cr
+#'
+#' [.. $test_parameters : \code{data.frame}]\cr
+#'
+#' \tabular{lll}{
+#' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+#'  POSITION \tab \code{numeric} \tab aliquot position \cr
+#'  PARAMETER \tab \code{character} \tab test parameter name \cr
+#'  THRESHOLD \tab \code{numeric} \tab set test parameter threshold value \cr
+#'  VALUE \tab \code{numeric} \tab the calculated test parameter value (to be compared with the threshold)\cr
+#'  STATUS \tab \code{character} \tab test parameter status either \code{"OK"} or \code{"FAILED"} \cr
+#'  SEQUENCE_NAME \tab \code{character} \tab name of the sequence, so far available \cr
+#'  UID \tab \code{character}\tab unique data set ID
+#' }
+#'
+#' [.. $fit : \code{data.frame}]\cr
+#'
+#' An \code{\link{nls}} object produced by the fitting.\cr
+#'
+#' [.. $slide : \code{list}]\cr
+#'
+#' A \code{\link{list}} with data produced during the sliding. Some elements are previously
+#' reported with the summary object data. List elements are:
+#'
+#' \tabular{lll}{
+#' \bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+#'  De \tab \code{numeric} \tab the final De obtained with the sliding approach \cr
+#'  De.MC \tab \code{numeric} \tab all De values obtained by the MC runs \cr
+#'  residuals \tab \code{numeric} \tab the obtained residuals for each channel of the curve \cr
+#'  trend.fit \tab \code{lm} \tab fitting results produced by the fitting of the residuals \cr
+#'  RF_nat.slided \tab \code{matrix} \tab the slided RF_nat curve \cr
+#'  t_n.id \tab \code{numeric} \tab the index of the t_n offset \cr
+#'  I_n \tab \code{numeric} \tab the vertical intensity offset if a vertical slide was applied \cr
+#'  algorithm_error \tab \code{numeric} \tab the vertical sliding suffers from a systematic effect induced by the used
+#'  algorithm. The returned value is the standard deviation of all obtained De values while expanding the
+#'  vertical sliding range. I can be added as systematic error to the final De error; so far wanted.\cr
+#'  vslide_range \tab \code{numeric} \tab the range used for the vertical sliding \cr
+#'  squared_residuals \tab \code{numeric} \tab the squared residuals (horizontal sliding)
+#' }
+#'
+#'
+#' \bold{slot:} \bold{\code{@info}} \cr
+#'
+#' The original function call (\code{\link[methods]{language-class}}-object)
 #'
 #' The output (\code{data}) should be accessed using the
 #' function \code{\link{get_RLum}}
 #'
-#' @note \bold{[THIS FUNCTION HAS BETA-STATUS]}\cr
+#' ------------------------\cr
+#' [ PLOT OUTPUT ]\cr
+#' ------------------------\cr
+#'
+#' The slided IR-RF curves with the finally obtained De
+#'
+#' @note
 #'
 #' This function assumes that there is no sensitivity change during the
 #' measurements (natural vs. regenerated signal), which is in contrast to the
-#' findings from Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
+#' findings by Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
 #' been almost fully re-written, but further thoughtful tests are still pending!
 #' However, as a lot new package functionality was introduced with the changes made
 #' for this function and to allow a part of such tests the re-newed code was made part
 #' of the current package.\cr
 #'
-#' @section Function version: 0.6.11
+#' @section Function version: 0.7.2
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
 #' @seealso \code{\linkS4class{RLum.Analysis}},
 #' \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
-#' \code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}
+#' \code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link[parallel]{mclapply}}
 #'
 #'
 #' @references Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T.,
@@ -262,6 +328,10 @@
 #' and dosimetry. Nuclear Instruments and Methods in Physics Research Section
 #' B: Beam Interactions with Materials and Atoms 207, 487-499.
 #'
+#' Frouin, M., Huot, S., Kreutzer, S., Lahaye, C., Lamothe, M., Philippe, A., Mercier, N., 2017.
+#' An improved radiofluorescence single-aliquot regenerative dose protocol for K-feldspars.
+#' Quaternary Geochronology 38, 13-24. doi:10.1016/j.quageo.2016.11.004
+#'
 #' Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New
 #' luminescence measurement facilities in retrospective dosimetry. Radiation
 #' Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006
@@ -330,6 +400,8 @@ analyse_IRSAR.RF<- function(
   ##TODO
   ## - if a file path is given, the function should try to find out whether an XSYG-file or
   ##   a BIN-file is provided
+  ##  - add NEWS for vslide_range
+  ##  - update documentary ... if it works as expected.
 
   # SELF CALL -----------------------------------------------------------------------------------
   if(is.list(object)){
@@ -438,8 +510,8 @@ analyse_IRSAR.RF<- function(
   }
 
     ##n.MC
-    if(!is(n.MC, "numeric") || n.MC <= 0){
-      stop("[analyse_IRSAR.RF()] argument 'n.MC' has to be of type integer and >= 0")
+    if((!is(n.MC, "numeric") || n.MC <= 0) && !is.null(n.MC)){
+      stop("[analyse_IRSAR.RF()] argument 'n.MC' has to be of type integer and >= 0", call. = FALSE)
     }
 
 
@@ -593,6 +665,7 @@ analyse_IRSAR.RF<- function(
   }
 
 
+  # Method Control Settings ---------------------------------------------------------------------
   ##===============================================================================================#
   ## SET METHOD CONTROL PARAMETER - FOR BOTH METHODS
   ##===============================================================================================#
@@ -600,13 +673,16 @@ analyse_IRSAR.RF<- function(
   ##set supported values with default
   method.control.settings <- list(
     trace = FALSE,
+    trace_vslide = FALSE,
     maxiter = 500,
     warnOnly = FALSE,
     minFactor = 1 / 4096,
     correct_onset = TRUE,
     show_density = TRUE,
     show_fit = FALSE,
-    n.MC = 1000
+    n.MC = if(is.null(n.MC)){NULL}else{1000},
+    vslide_range = NULL,
+    cores = NULL
   )
 
   ##modify list if necessary
@@ -701,8 +777,8 @@ analyse_IRSAR.RF<- function(
   RF_nat.mean <- mean(RF_nat.limited[,2])
   RF_nat.sd <- sd(RF_nat.limited[,2])
 
-  RF_nat.error.lower <- quantile(RF_nat.limited[,2], 0.975)
-  RF_nat.error.upper <- quantile(RF_nat.limited[,2], 0.025)
+  RF_nat.error.lower <- quantile(RF_nat.limited[,2], 0.975, na.rm = TRUE)
+  RF_nat.error.upper <- quantile(RF_nat.limited[,2], 0.025, na.rm = TRUE)
 
 
   ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#
@@ -740,7 +816,7 @@ analyse_IRSAR.RF<- function(
 
     ##produce set of start paramters
     phi.0.MC <- rep(fit.parameters.start["phi.0"], n.MC)
-    lambda.MC <- seq(0.0001, 0.001, by=(0.001-0.0001)/n.MC) ##TODO
+    lambda.MC <- seq(0.0001, 0.001, by=(0.001-0.0001)/n.MC)
     beta.MC <- rep(fit.parameters.start["beta"], n.MC)
     delta.phi.MC <- rep(fit.parameters.start["delta.phi"], n.MC)
 
@@ -893,20 +969,30 @@ analyse_IRSAR.RF<- function(
   else if(method == "SLIDE"){
 
     ##convert to matrix (in fact above the matrix data were first transfered to data.frames ... here
-    ##we correct this ... again)  ##TODO
+    ##we correct this ... again)
     RF_nat.limited <- as.matrix(RF_nat.limited)
     RF_reg.limited <- matrix(c(RF_reg.x, RF_reg.y), ncol = 2)
     RF_nat <- as.matrix(RF_nat)
 
     ##DEFINE FUNCTION FOR SLIDING
     ##FIND MINIMUM - this is done in a function so that it can be further used for MC simulations
+    # sliding() -----------------------------------------------------------------------------------
     sliding <- function(RF_nat,
                         RF_nat.limited,
                         RF_reg.limited,
                         n.MC = method.control.settings$n.MC,
+                        vslide_range = method.control.settings$vslide_range,
+                        trace = method.control.settings$trace_vslide,
                         numerical.only = FALSE){
 
 
+      ##check for odd user input
+      if(length(vslide_range) > 2){
+        vslide_range <- vslide_range[1:2]
+        warning("[anlayse_IRSAR.RF()] method.control = list(vslide_range) has more than 2 elements. Only the first two were used!", call. = FALSE)
+
+      }
+
       ##(0) set objects ... nomenclature as used in Frouin et al., please note that here the index
       ##is used instead the real time values
       t_max.id <- nrow(RF_reg.limited)
@@ -919,50 +1005,169 @@ analyse_IRSAR.RF<- function(
       #pre-allocate object
       temp.sum.residuals <- vector("numeric", length = t_max.id - t_max_nat.id)
 
-      ##calculate sum of squared residuals ... for the entire set
+      ##initialise slide range for specific conditions, namely NULL and "auto"
+      if (is.null(vslide_range)) {
+        vslide_range <- 0
+
+      } else if (vslide_range[1] == "auto") {
+        vslide_range <- -(max(RF_reg.limited[, 2]) - min(RF_reg.limited[, 2])):(max(RF_reg.limited[, 2]) - min(RF_reg.limited[, 2]))
+        algorithm_error <- NA
+
+      } else{
+        vslide_range <- vslide_range[1]:vslide_range[2]
+        algorithm_error <- NULL
+
+      }
+
+      ##problem: the optimisation routine slightly depends on the chosen input sliding vector
+      ##and it might get trapped in a local minimum
+      ##therefore we run the algorithm by expanding the sliding vector
+      if(!is.null(vslide_range) && vslide_range != 0){
+
+        ##even numbers makes it complicated, so let's make it odd if not already the case
+        if(length(vslide_range) %% 2 == 0){
+          vslide_range <- c(vslide_range[1], vslide_range, vslide_range)
+
+        }
+
+        ##construct list of vector ranges we want to check for, this should avoid that we
+        ##got trapped in a local minium
+        median_vslide_range.index <- median(1:length(vslide_range))
+        vslide_range.list <- lapply(seq(1, median_vslide_range.index, length.out = 10), function(x){
+           c(median_vslide_range.index - as.integer(x), median_vslide_range.index + as.integer(x))
+        })
+
+        ##correct for out of bounds problem; it might occur
+        vslide_range.list[[10]] <- c(0, length(vslide_range))
+
+        ##TODO ... this is not really optimal, but ok for the moment, better would be
+        ##the algorithm finds sufficiently the global minium.
+        ##now run it in a loop and expand the range from the inner to the outer part
+        ##at least this is considered for the final error range ...
+        temp_minium_list <- lapply(1:10, function(x){
+          .analyse_IRSARRF_SRS(
+            values_regenerated_limited =  RF_reg.limited[,2],
+            values_natural_limited = RF_nat.limited[,2],
+            vslide_range = vslide_range[vslide_range.list[[x]][1]:vslide_range.list[[x]][2]],
+            n_MC = 0, #we don't need MC runs here, so make it quick
+            trace = trace)[c("sliding_vector_min_index","vslide_minimum", "vslide_index")]
+
+        })
+
+        ##get all horizontal index value for the local minimum (corresponding to the vslide)
+        temp_hslide_indices <- vapply(temp_minium_list, function(x){
+          x$sliding_vector_min_index}, FUN.VALUE = numeric(length = 1))
+
+        ##get also the vertical slide indicies
+        temp_vslide_indicies <- vapply(temp_minium_list, function(x){
+          x$vslide_index}, FUN.VALUE = numeric(length = 1))
+
+        ##get all the minimum values
+        temp_minium <- vapply(temp_minium_list, function(x){x$vslide_minimum}, FUN.VALUE = numeric(length = 1))
+
+        ##get minimum and set it to the final range
+        vslide_range <- vslide_range[
+          vslide_range.list[[which.min(temp_minium)]][1]:vslide_range.list[[which.min(temp_minium)]][2]]
+
+
+        ##get all possible t_n values for the range expansion ... this can be considered
+        ##as somehow systematic uncertainty, but it will be only calculated of the full range
+        ##is considered, otherwise it is too biased by the user's choice
+        ##ToDo: So far the algorithm error is not sufficiently documented
+        if(!is.null(algorithm_error)){
+          algorithm_error <- sd(vapply(1:length(temp_vslide_indicies), function(k){
+            temp.sliding.step <- RF_reg.limited[temp_hslide_indices[k]] - t_min
+            matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2] + temp_vslide_indicies[k]), ncol = 2)[1,1]
+
+          }, FUN.VALUE = numeric(length = 1)))
+
+        }else{
+         algorithm_error <- NA
+
+        }
+
+      }else{
+        algorithm_error <- NA
+
+      }
+
+      ##now run the final sliding with the identified range that corresponds to the minium value
       temp.sum.residuals <-
         .analyse_IRSARRF_SRS(
           values_regenerated_limited =  RF_reg.limited[,2],
           values_natural_limited = RF_nat.limited[,2],
-          n_MC =  n.MC
-        )
-
+          vslide_range = vslide_range,
+          n_MC = if(is.null(n.MC)){0}else{n.MC},
+          trace = trace
+      )
 
       #(2) get minimum value (index and time value)
-      t_n.id <- which.min(temp.sum.residuals$sliding_vector)
+      index_min <- which.min(temp.sum.residuals$sliding_vector)
+      t_n.id <- index_min
+
+      if (is.null(vslide_range)) {
+        I_n <- 0
+      } else{
+        I_n <- vslide_range[temp.sum.residuals$vslide_index]
+      }
 
       temp.sliding.step <- RF_reg.limited[t_n.id] - t_min
 
       ##(3) slide curve graphically ... full data set we need this for the plotting later
-      RF_nat.slided <- matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2]), ncol = 2)
+      RF_nat.slided <- matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2] + I_n), ncol = 2)
       t_n <- RF_nat.slided[1,1]
 
       ##the same for the MC runs of the minimum values
-      t_n.MC <-
-        vapply(X = 1:length(temp.sum.residuals$sliding_vector_min_MC), FUN = function(x) {
-          t_n.id.MC <-
-            which(temp.sum.residuals$sliding_vector == temp.sum.residuals$sliding_vector_min_MC[x])
-          temp.sliding.step.MC <- RF_reg.limited[t_n.id.MC] - t_min
-          t_n.MC <- (RF_nat[,1] + temp.sliding.step.MC)[1]
-          return(t_n.MC)
+      if(!is.null(n.MC)) {
+        t_n.MC <-
+          vapply(
+            X = 1:length(temp.sum.residuals$sliding_vector_min_MC),
+            FUN = function(x) {
+              ##get minimum for MC
+              t_n.id.MC <-
+                which(
+                  temp.sum.residuals$sliding_vector == temp.sum.residuals$sliding_vector_min_MC[x]
+                )
+
+              ##there is low change to get two indicies, in
+              ##such cases we should take the mean
+              temp.sliding.step.MC <-
+                RF_reg.limited[t_n.id.MC] - t_min
+
+              if(length(temp.sliding.step.MC)>1){
+                t_n.MC <- (RF_nat[, 1] + mean(temp.sliding.step.MC))[1]
+
+              }else{
+                t_n.MC <- (RF_nat[, 1] + temp.sliding.step.MC)[1]
+
+              }
+
+              return(t_n.MC)
+
+            },
+            FUN.VALUE = vector(mode = "numeric", length = 1)
+          )
 
-        }, FUN.VALUE = vector(mode = "numeric", length = 1))
+      } else{
+        t_n.MC <- NA_integer_
 
+      }
 
       ##(4) get residuals (needed to be plotted later)
       ## they cannot be longer than the RF_reg.limited curve
       if((t_n.id+length(RF_nat.limited[,2])-1) >= nrow(RF_reg.limited)){
-        residuals <- RF_nat.limited[1:length(t_n.id:nrow(RF_reg.limited)),2]
+        residuals <- (RF_nat.limited[1:length(t_n.id:nrow(RF_reg.limited)),2] + I_n)
         - RF_reg.limited[t_n.id:nrow(RF_reg.limited), 2]
 
       }else{
-        residuals <- RF_nat.limited[,2] - RF_reg.limited[t_n.id:(t_n.id+length(RF_nat.limited[,2])-1), 2]
+        residuals <- (RF_nat.limited[,2] + I_n) - RF_reg.limited[t_n.id:(t_n.id+length(RF_nat.limited[,2])-1), 2]
 
       }
 
       ##(4.1) calculate De from the first channel ... which is t_n here
       De <- round(t_n, digits = 2)
       De.MC <- round(t_n.MC, digits = 2)
+
       temp.trend.fit <- NA
 
       ##(5) calculate trend fit
@@ -975,8 +1180,6 @@ analyse_IRSAR.RF<- function(
 
       }
 
-
-
       ##return values and limited if they are not needed
       if (numerical.only == FALSE) {
         return(
@@ -987,6 +1190,9 @@ analyse_IRSAR.RF<- function(
             trend.fit = temp.trend.fit,
             RF_nat.slided = RF_nat.slided,
             t_n.id = t_n.id,
+            I_n = I_n,
+            algorithm_error = algorithm_error,
+            vslide_range = if(is.null(vslide_range)){NA}else{range(vslide_range)},
             squared_residuals = temp.sum.residuals$sliding_vector
           )
         )
@@ -1004,12 +1210,11 @@ analyse_IRSAR.RF<- function(
       RF_reg.limited = RF_reg.limited
     )
 
-
     ##write results in variables
     De <- slide$De
     residuals <- slide$residuals
     RF_nat.slided <-  slide$RF_nat.slided
-
+    I_n <- slide$I_n
 
     # ERROR ESTIMATION
     # MC runs for error calculation ---------------------------------------------------------------
@@ -1017,71 +1222,124 @@ analyse_IRSAR.RF<- function(
     ##set residual matrix for MC runs, i.e. set up list of pseudo RF_nat curves as function
     ##(i.e., bootstrap from the natural curve distribution)
 
-    slide.MC.list <- lapply(1:n.MC,function(x) {
-
-      ##also here we have to account for the case that user do not understand
-      ##what they are doing ...
-      if(slide$t_n.id + nrow(RF_nat.limited)-1 > nrow(RF_reg.limited)){
-        cbind(
-          RF_nat.limited[1:length(slide$t_n.id:nrow(RF_reg.limited)),1],
-          (RF_reg.limited[slide$t_n.id:nrow(RF_reg.limited) ,2]
-           + sample(residuals,
-                    size = length(slide$t_n.id:nrow(RF_reg.limited)),
-                    replace = TRUE)
+    if(!is.null(n.MC)){
+      slide.MC.list <- lapply(1:n.MC,function(x) {
+
+        ##also here we have to account for the case that user do not understand
+        ##what they are doing ...
+        if(slide$t_n.id + nrow(RF_nat.limited)-1 > nrow(RF_reg.limited)){
+          cbind(
+            RF_nat.limited[1:length(slide$t_n.id:nrow(RF_reg.limited)),1],
+            (RF_reg.limited[slide$t_n.id:nrow(RF_reg.limited) ,2]
+             + sample(residuals,
+                      size = length(slide$t_n.id:nrow(RF_reg.limited)),
+                      replace = TRUE)
+            )
           )
-        )
 
-      }else{
-        cbind(
-          RF_nat.limited[,1],
-          (RF_reg.limited[slide$t_n.id:(slide$t_n.id + nrow(RF_nat.limited)-1) ,2]
-           + sample(residuals, size = nrow(RF_nat.limited), replace = TRUE)
+        }else{
+          cbind(
+            RF_nat.limited[,1],
+            (RF_reg.limited[slide$t_n.id:(slide$t_n.id + nrow(RF_nat.limited)-1) ,2]
+             + sample(residuals, size = nrow(RF_nat.limited), replace = TRUE)
+            )
           )
-        )
+        }
+
+      })
+
+
+      if(txtProgressBar){
+        ##terminal output fo MC
+        cat("\n\t Run Monte Carlo loops for error estimation\n")
+
+        ##progress bar
+        pb<-txtProgressBar(min=0, max=n.MC, initial=0, char="=", style=3)
       }
 
-    })
 
+      ##set parallel calculation if wanted
+      if(is.null(method.control.settings$cores)){
+        cores <- 1
 
-    if(txtProgressBar){
-      ##terminal output fo MC
-      cat("\n\t Run Monte Carlo loops for error estimation\n")
+      }else{
+        ##case 'auto'
+        if(method.control.settings$cores == 'auto'){
+          if(parallel::detectCores() <= 2){
+            warning("[analyse_IRSAR.RF()] For the multicore auto mode at least 4 cores are needed!", call. = FALSE)
+            cores <- 1
 
-      ##progress bar
-      pb<-txtProgressBar(min=0, max=n.MC, initial=0, char="=", style=3)
-    }
+          }else{
+            cores <- parallel::detectCores() - 2
 
+          }
 
-    De.MC <- c(vapply(X = 1:n.MC,
-                    FUN.VALUE = vector("numeric", length = method.control.settings$n.MC),
-                    FUN = function(i){
+        }else if(is.numeric(method.control.settings$cores)){
+
+          if(method.control.settings$cores > parallel::detectCores()){
+            warning(paste0("[analyse_IRSAR.RF()] What do you want? Your machine has only ", parallel::detectCores(), " cores!"), call. = FALSE)
+          }
+
+          ##assign them anyway, it is not our problem
+          cores <- parallel::detectCores()
+
+        }else{
+          try(stop("[analyse_IRSAR.RF()] Invalid value for control argument 'cores'. Value set to 1", call. = FALSE))
+          cores <- 1
+
+        }
 
-      temp.slide.MC <- sliding(
-        RF_nat = RF_nat,
-        RF_reg.limited = RF_reg.limited,
-        RF_nat.limited = slide.MC.list[[i]],
-        numerical.only = TRUE
-      )
 
-      ##update progress bar
-      if (txtProgressBar) {
-        setTxtProgressBar(pb, i)
+        ##return message
+        message(paste("[analyse_IRSAR.RF()] Multicore mode using", cores, "cores..."))
       }
 
-       ##do nothing else, just report all possible values
-       return(temp.slide.MC[[2]])
 
-    }))
 
-    ##close
-    if(txtProgressBar){close(pb)}
+      ##run MC runs
+      De.MC <- unlist(parallel::mclapply(X = 1:n.MC,
+                      FUN = function(i){
 
-    ##calculate absolute deviation between De and the here newly calculated De.MC
-    ##this is, e.g. ^t_n.1* - ^t_n in Frouin et al.
-    De.diff <- diff(x = c(De, De.MC))
-    De.error <- round(sd(De.MC), digits = 2)
-    De.lower <- De - quantile(De.diff, 0.975)
-    De.upper <- De - quantile(De.diff, 0.025)
+        temp.slide.MC <- sliding(
+          RF_nat = RF_nat,
+          RF_reg.limited = RF_reg.limited,
+          RF_nat.limited = slide.MC.list[[i]],
+          numerical.only = TRUE
+        )
+
+
+        ##update progress bar
+        if (txtProgressBar) {
+          setTxtProgressBar(pb, i)
+        }
+
+         ##do nothing else, just report all possible values
+         return(temp.slide.MC[[2]])
+
+      },
+      mc.preschedule = TRUE,
+      mc.cores = cores
+      ))
+
+      ##close
+      if(txtProgressBar){close(pb)}
+
+      ##calculate absolute deviation between De and the here newly calculated De.MC
+      ##this is, e.g. ^t_n.1* - ^t_n in Frouin et al.
+      De.diff <- diff(x = c(De, De.MC))
+      De.error <- round(sd(De.MC), digits = 2)
+      De.lower <- De - quantile(De.diff, 0.975, na.rm = TRUE)
+      De.upper <- De - quantile(De.diff, 0.025, na.rm = TRUE)
+
+    }else{
+
+      De.diff <- NA_integer_
+      De.error <- NA_integer_
+      De.lower <- NA_integer_
+      De.upper <- NA_integer_
+      De.MC <- NA_integer_
+
+    }
 
   }else{
 
@@ -1357,7 +1615,6 @@ analyse_IRSAR.RF<- function(
     ylim  <- if("ylim" %in% names(list(...))) {list(...)$ylim} else
     {c(min(temp.sequence_structure$y.min), max(temp.sequence_structure$y.max))}
 
-
     ##open plot area
     plot(
       NA,NA,
@@ -1599,7 +1856,8 @@ analyse_IRSAR.RF<- function(
       ##(0) density plot
       if (method.control.settings$show_density) {
         ##showing the density makes only sense when we see at least 10 data points
-        if (length(unique(De.MC)) >= 15) {
+        if (!is.na(De.MC) && length(unique(De.MC)) >= 15) {
+
           ##calculate density De.MC
           density.De.MC <- density(De.MC)
 
@@ -1631,7 +1889,7 @@ analyse_IRSAR.RF<- function(
                   col = rgb(0,0.4,0.8,0.5))
 
         }else{
-          warning("Narrow density distribution, no density distribution plotted!")
+          warning("[analyse_IRSAR.RF()] Narrow density distribution, no density distribution plotted!", call. = FALSE)
 
         }
 
@@ -1670,6 +1928,23 @@ analyse_IRSAR.RF<- function(
         )
       }
 
+      ##(5) add vertical shift as arrow; show nothing if nothing was shifted
+      if (plot.settings$log != "y" & plot.settings$log != "xy" & I_n != 0) {
+        shape::Arrows(
+          x0 = (0 + par()$usr[1])/2,
+          y0 = RF_nat[1,2],
+          y1 = RF_nat[1,2] + I_n,
+          x1 = (0 + par()$usr[1])/2,
+          arr.type = "triangle",
+          arr.length = 0.3 * par()[["cex"]],
+          code = 2,
+          col = col[2],
+          arr.adj = 1,
+          arr.lwd = 1
+        )
+      }
+
+
       ##TODO
       ##uncomment here to see all the RF_nat curves produced by the MC runs
       ##could become a polygone for future versions
@@ -1844,6 +2119,8 @@ analyse_IRSAR.RF<- function(
     }
 
   }#endif::plot
+
+  # Return --------------------------------------------------------------------------------------
   ##=============================================================================#
   ## RETURN
   ##=============================================================================#
@@ -1911,4 +2188,3 @@ analyse_IRSAR.RF<- function(
   invisible(newRLumResults.analyse_IRSAR.RF)
 
 }
-
diff --git a/R/analyse_SAR.CWOSL.R b/R/analyse_SAR.CWOSL.R
index b008c8c..8caacf9 100644
--- a/R/analyse_SAR.CWOSL.R
+++ b/R/analyse_SAR.CWOSL.R
@@ -78,7 +78,7 @@
 #' as the maximum background integral for the Tx curve.
 #'
 #' @param rejection.criteria \code{\link{list}} (with default): provide a named list
-#' and set rejection criteria in percentage for further calculation. Can be a \code{\link{list}} in
+#' and set rejection criteria in \bold{percentage} for further calculation. Can be a \code{\link{list}} in
 #' a \code{\link{list}}, if \code{object} is of type \code{\link{list}}
 #'
 #' Allowed arguments are \code{recycling.ratio}, \code{recuperation.rate},
@@ -117,7 +117,8 @@
 #'
 #' @return A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
 #' returned containing the following elements:
-#' \item{De.values}{\link{data.frame} containing De-values, De-error and
+#'
+#' \item{data}{\link{data.frame} containing De-values, De-error and
 #' further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all
 #' calculated Lx/Tx values including signal, background counts and the dose
 #' points} \item{rejection.criteria}{\link{data.frame} with values that might
@@ -133,7 +134,7 @@
 #'
 #' \bold{The function currently does only support 'OSL' or 'IRSL' data!}
 #'
-#' @section Function version: 0.7.5
+#' @section Function version: 0.7.10
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -287,7 +288,7 @@ if(is.list(object)){
   ##merge results and check if the output became NULL
   results <- merge_RLum(temp)
 
-  ##DO NOT use invisible here, this will stop the function from stopping
+  ##DO NOT use invisible here, this will prevent the function from stopping
   if(length(results) == 0){
     return(NULL)
 
@@ -324,7 +325,7 @@ if(is.list(object)){
     }
 
     if(missing("signal.integral.max") & !is.list(object)){
-      signal.integral.min <- 2
+      signal.integral.max <- 2
       warning("[analyse_SAR.CWOSL()] 'signal.integral.max' missing, set to 2", call. = FALSE)
     }
 
@@ -416,6 +417,20 @@ if(is.list(object)){
 
     ##modify list on the request
     if(!is.null(rejection.criteria)){
+
+      ##check if the provided values are valid at all
+      if(!all(names(rejection.criteria)%in%names(rejection.criteria.default))){
+        try(stop(
+          paste0("[analyse_SAR.CWOSL()] Rejection criteria '",
+                paste(
+                  names(
+                    rejection.criteria)[
+                      !names(rejection.criteria)%in%names(rejection.criteria.default)], collapse = ", ")
+                       ,"' unknown! Input ignored!"), call. = FALSE))
+
+      }
+
+      ##modify list
       rejection.criteria <- modifyList(rejection.criteria.default, rejection.criteria)
 
     }else{
@@ -597,12 +612,13 @@ if(is.list(object)){
 
     ##separate TL curves
     TL.Curves.ID.Lx <-
-      sapply(1:length(OSL.Curves.ID.Lx), function(x) {
+      lapply(1:length(OSL.Curves.ID.Lx), function(x) {
         TL.Curves.ID[which(TL.Curves.ID == (OSL.Curves.ID.Lx[x] - 1))]
       })
 
+
     TL.Curves.ID.Tx <-
-      sapply(1:length(OSL.Curves.ID.Tx), function(x) {
+      lapply(1:length(OSL.Curves.ID.Tx), function(x) {
         TL.Curves.ID[which(TL.Curves.ID == (OSL.Curves.ID.Tx[x] - 1))]
       })
 
@@ -844,18 +860,19 @@ if(is.list(object)){
         })
 
     }else{
-      temp.status.RecyclingRatio <- "OK"
+      temp.status.RecyclingRatio <- rep("OK", length(RecyclingRatio))
 
     }
 
     ##Recuperation
-    if (!is.na(Recuperation)[1] & !is.na(rejection.criteria$recuperation.rate)) {
+    if (!is.na(Recuperation)[1] &
+        !is.na(rejection.criteria$recuperation.rate)) {
       temp.status.Recuperation  <-
         sapply(1:length(Recuperation), function(x) {
-          if(Recuperation[x] > rejection.criteria$recuperation.rate){
+          if (Recuperation[x] > rejection.criteria$recuperation.rate / 100) {
             "FAILED"
 
-          }else{
+          } else{
             "OK"
 
           }
@@ -867,6 +884,7 @@ if(is.list(object)){
 
     }
 
+
     # Provide Rejection Criteria for Testdose error --------------------------
     testdose.error.calculated <- (LnLxTnTx$Net_TnTx.Error/LnLxTnTx$Net_TnTx)[1]
 
@@ -990,12 +1008,12 @@ if(is.list(object)){
         if (length(TL.Curves.ID.Lx[[1]] > 0)) {
           ##It is just an approximation taken from the data
           resolution.TLCurves <-  round(mean(diff(
-            round(object at records[[TL.Curves.ID.Lx[1]]]@data[,1], digits = 1)
+            round(object at records[[TL.Curves.ID.Lx[[1]]]]@data[,1], digits = 1)
           )), digits = 1)
 
           ylim.range <-
             sapply(seq(1,length(TL.Curves.ID.Lx),by = 1) ,function(x) {
-              range(object at records[[TL.Curves.ID.Lx[x]]]@data[,2])
+              range(object at records[[TL.Curves.ID.Lx[[x]]]]@data[,2])
 
             })
 
@@ -1004,8 +1022,8 @@ if(is.list(object)){
             xlab = "T [\u00B0C]",
             ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep =
                            ""),
-            xlim = c(object at records[[TL.Curves.ID.Lx[1]]]@data[1,1],
-                     max(object at records[[TL.Curves.ID.Lx[1]]]@data[,1])),
+            xlim = c(object at records[[TL.Curves.ID.Lx[[1]]]]@data[1,1],
+                     max(object at records[[TL.Curves.ID.Lx[[1]]]]@data[,1])),
             ylim = c(1,max(ylim.range)),
             main = main,
             log = if (log == "y" | log == "xy") {
@@ -1024,7 +1042,7 @@ if(is.list(object)){
 
           ##plot TL curves
           sapply(1:length(TL.Curves.ID.Lx) ,function(x) {
-            lines(object at records[[TL.Curves.ID.Lx[x]]]@data,col = col[x])
+            lines(object at records[[TL.Curves.ID.Lx[[x]]]]@data,col = col[x])
 
           })
 
@@ -1132,12 +1150,12 @@ if(is.list(object)){
         if (length(TL.Curves.ID.Tx[[1]] > 0)) {
           ##It is just an approximation taken from the data
           resolution.TLCurves <-  round(mean(diff(
-            round(object at records[[TL.Curves.ID.Tx[1]]]@data[,1], digits = 1)
+            round(object at records[[TL.Curves.ID.Tx[[1]]]]@data[,1], digits = 1)
           )), digits = 1)
 
 
           ylim.range <- sapply(1:length(TL.Curves.ID.Tx) ,function(x) {
-            range(object at records[[TL.Curves.ID.Tx[x]]]@data[,2])
+            range(object at records[[TL.Curves.ID.Tx[[x]]]]@data[,2])
 
           })
 
@@ -1147,8 +1165,8 @@ if(is.list(object)){
             NA,NA,
             xlab = "T [\u00B0C]",
             ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep = ""),
-            xlim = c(object at records[[TL.Curves.ID.Tx[1]]]@data[1,1],
-                     max(object at records[[TL.Curves.ID.Tx[1]]]@data[,1])),
+            xlim = c(object at records[[TL.Curves.ID.Tx[[1]]]]@data[1,1],
+                     max(object at records[[TL.Curves.ID.Tx[[1]]]]@data[,1])),
             ylim = c(1,max(ylim.range)),
             main = main,
             log = if (log == "y" | log == "xy") {
@@ -1167,7 +1185,7 @@ if(is.list(object)){
 
           ##plot TL curves
           sapply(1:length(TL.Curves.ID.Tx) ,function(x) {
-            lines(object at records[[TL.Curves.ID.Tx[x]]]@data,col = col[x])
+            lines(object at records[[TL.Curves.ID.Tx[[x]]]]@data,col = col[x])
 
           })
 
@@ -1677,7 +1695,6 @@ if(is.list(object)){
     }
 
 
-
     # Return --------------------------------------------------------------------------------------
     invisible(temp.results.final)
 
@@ -1691,3 +1708,5 @@ if(is.list(object)){
   }
 
 }
+
+
diff --git a/R/analyse_SAR.TL.R b/R/analyse_SAR.TL.R
index c27570b..2172de7 100644
--- a/R/analyse_SAR.TL.R
+++ b/R/analyse_SAR.TL.R
@@ -26,6 +26,11 @@
 #' channel number for the upper signal integral bound (e.g.
 #' \code{signal.integral.max = 200})
 #'
+#' @param integral_input \code{\link{character}} (with default): defines the input for the
+#' the arguments \code{signal.integral.min} and \code{signal.integral.max}. These limits can be
+#' either provided \code{'channel'} number (the default) or \code{'temperature'}. If \code{'temperature'}
+#' is chosen the best matching channel is selected.
+#'
 #' @param sequence.structure \link{vector} \link{character} (with default):
 #' specifies the general sequence structure. Three steps are allowed (
 #' \code{"PREHEAT"}, \code{"SIGNAL"}, \code{"BACKGROUND"}), in addition a
@@ -54,10 +59,11 @@
 #' as rejection criteria. NA is produced if no R0 dose point exists.}\cr\cr
 #' \bold{note:} the output should be accessed using the function
 #' \code{\link{get_RLum}}
+#'
 #' @note \bold{THIS IS A BETA VERSION}\cr\cr None TL curves will be removed
 #' from the input object without further warning.
 #'
-#' @section Function version: 0.1.5
+#' @section Function version: 0.2.0
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
@@ -71,7 +77,9 @@
 #' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an
 #' improved single-aliquot regenerative-dose protocol. Radiation Measurements
 #' 32, 57-73.
+#'
 #' @keywords datagen plot
+#'
 #' @examples
 #'
 #'
@@ -95,6 +103,7 @@ analyse_SAR.TL <- function(
   object.background,
   signal.integral.min,
   signal.integral.max,
+  integral_input = "channel",
   sequence.structure = c("PREHEAT", "SIGNAL", "BACKGROUND"),
   rejection.criteria = list(recycling.ratio = 10, recuperation.rate = 10),
   dose.points,
@@ -151,10 +160,11 @@ analyse_SAR.TL <- function(
   temp.sequence.structure <- temp.sequence.structure[which(
     temp.sequence.structure[,"protocol.step"]!="EXCLUDE"),]
 
+
   ##check integrity; signal and bg range should be equal
   if(length(
     unique(
-      temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","x.max"]))>1){
+      temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","n.channels"]))>1){
 
     stop(paste(
       "[analyse_SAR.TL()] Signal range differs. Check sequence structure.\n",
@@ -164,13 +174,11 @@ analyse_SAR.TL <- function(
 
   ##check if the wanted curves are a multiple of the structure
   if(length(temp.sequence.structure[,"id"])%%length(sequence.structure)!=0){
-
     stop("[analyse_SAR.TL()] Input TL curves are not a multiple of the sequence structure.")
 
   }
 
 
-
   # # Calculate LnLxTnTx values  --------------------------------------------------
 
   ##grep IDs for signal and background curves
@@ -183,32 +191,58 @@ analyse_SAR.TL <- function(
   TL.background.ID <- temp.sequence.structure[
     temp.sequence.structure[,"protocol.step"] == "BACKGROUND","id"]
 
+  ##comfort ... translate integral limits from temperature to channel
+  if(integral_input == "temperature"){
+    signal.integral.min <-
+      which.min(abs(
+        signal.integral.min - get_RLum(object, record.id = TL.signal.ID[1])[, 1]
+      ))
+    signal.integral.max <-
+      which.min(abs(
+        signal.integral.max - get_RLum(object, record.id = TL.signal.ID[1])[, 1]
+      ))
+  }
 
   ##calculate LxTx values using external function
-
   for(i in seq(1,length(TL.signal.ID),by=2)){
 
     temp.LnLxTnTx <- get_RLum(
       calc_TLLxTxRatio(
-        Lx.data.signal = get_RLum(object, record.id=TL.signal.ID[i]),
-        Lx.data.background = get_RLum(object, record.id=TL.background.ID[i]),
-        Tx.data.signal = get_RLum(object, record.id=TL.signal.ID[i+1]),
-        Tx.data.background = get_RLum(object, record.id = TL.background.ID[i+1]),
+        Lx.data.signal = get_RLum(object, record.id = TL.signal.ID[i]),
+        Lx.data.background = if (length(TL.background.ID) == 0) {
+          NULL
+        } else{
+          get_RLum(object, record.id = TL.background.ID[i])
+        },
+        Tx.data.signal = get_RLum(object, record.id = TL.signal.ID[i + 1]),
+        Tx.data.background =  if (length(TL.background.ID) == 0){
+          NULL
+
+        }else{
+          get_RLum(object, record.id = TL.background.ID[i + 1])
+
+        },
         signal.integral.min,
-        signal.integral.max))
+        signal.integral.max
+      )
+    )
 
     ##grep dose
     temp.Dose <- object at records[[TL.signal.ID[i]]]@info$IRR_TIME
 
+      ##take about NULL values
+      if(is.null(temp.Dose)){
+        temp.Dose <- NA
+
+      }
 
+    ##bind data.frame
     temp.LnLxTnTx <- cbind(Dose=temp.Dose, temp.LnLxTnTx)
 
     if(exists("LnLxTnTx")==FALSE){
-
       LnLxTnTx <- data.frame(temp.LnLxTnTx)
 
     }else{
-
       LnLxTnTx <- rbind(LnLxTnTx,temp.LnLxTnTx)
 
     }
@@ -326,9 +360,9 @@ analyse_SAR.TL <- function(
   ##============================================================================##
 
   # Plotting - Config -------------------------------------------------------
-
   ##grep plot parameter
   par.default <- par(no.readonly = TRUE)
+  on.exit(par(par.default))
 
   ##colours and double for plotting
   col <- get("col", pos = .LuminescenceEnv)
@@ -379,8 +413,6 @@ analyse_SAR.TL <- function(
 
   ##plot curves
   sapply(seq(1,length(TL.signal.ID),by=2), function(x){
-
-
     lines(object at records[[TL.signal.ID[x]]]@data,col=col.doubled[x])
 
   })
@@ -408,8 +440,6 @@ analyse_SAR.TL <- function(
 
   ##plot curves
   sapply(seq(2,length(TL.signal.ID),by=2), function(x){
-
-
     lines(object at records[[TL.signal.ID[x]]]@data,col=col.doubled[x])
 
   })
@@ -421,145 +451,179 @@ analyse_SAR.TL <- function(
 
   # Plotting Plateau Test LnLx -------------------------------------------------
 
-  NTL.net.LnLx <- data.frame(object at records[[TL.signal.ID[1]]]@data[,1],
-                             object at records[[TL.signal.ID[1]]]@data[,2]-
-                               object at records[[TL.background.ID[1]]]@data[,2])
-
-  Reg1.net.LnLx <- data.frame(object at records[[TL.signal.ID[3]]]@data[,1],
-                              object at records[[TL.signal.ID[3]]]@data[,2]-
-                                object at records[[TL.background.ID[3]]]@data[,2])
-
-
-  TL.Plateau.LnLx <- data.frame(NTL.net.LnLx[,1], Reg1.net.LnLx[,2]/NTL.net.LnLx[,2])
-
-  ##Plot Plateau Test
-  plot(NA, NA,
-       xlab = "Temp. [\u00B0C]",
-       ylab = "TL [a.u.]",
-       xlim = c(min(signal.integral.temperature)*0.9, max(signal.integral.temperature)*1.1),
-       ylim = c(0, max(NTL.net.LnLx[,2])),
-       main = expression(paste("Plateau test ",L[n],",",L[x]," curves",sep=""))
-  )
-
-
-  ##plot single curves
-  lines(NTL.net.LnLx, col=col[1])
-  lines(Reg1.net.LnLx, col=col[2])
-
-
-  ##plot
-  par(new=TRUE)
-  plot(TL.Plateau.LnLx,
-       axes=FALSE,
-       xlab="",
-       ylab="",
-       ylim=c(0,
-              quantile(TL.Plateau.LnLx[c(signal.integral.min:signal.integral.max),2],
-                       probs = c(0.90), na.rm = TRUE)+3),
-       col="darkgreen")
-  axis(4)
-
-
-  # Plotting Plateau Test TnTx -------------------------------------------------
-
-  ##get NTL signal
-  NTL.net.TnTx <- data.frame(object at records[[TL.signal.ID[2]]]@data[,1],
-                             object at records[[TL.signal.ID[2]]]@data[,2]-
-                               object at records[[TL.background.ID[2]]]@data[,2])
-
-  ##get signal from the first regeneration point
-  Reg1.net.TnTx <- data.frame(object at records[[TL.signal.ID[4]]]@data[,1],
-                              object at records[[TL.signal.ID[4]]]@data[,2]-
-                                object at records[[TL.background.ID[4]]]@data[,2])
-
+  if(length(TL.background.ID) != 0){
+    NTL.net.LnLx <-
+      data.frame(object at records[[TL.signal.ID[1]]]@data[, 1],
+                 object at records[[TL.signal.ID[1]]]@data[, 2] -
+                   object at records[[TL.background.ID[1]]]@data[, 2])
+
+    Reg1.net.LnLx <-
+      data.frame(object at records[[TL.signal.ID[3]]]@data[, 1],
+                 object at records[[TL.signal.ID[3]]]@data[, 2] -
+                   object at records[[TL.background.ID[3]]]@data[, 2])
+
+
+    TL.Plateau.LnLx <-
+      data.frame(NTL.net.LnLx[, 1], Reg1.net.LnLx[, 2] / NTL.net.LnLx[, 2])
+
+    ##Plot Plateau Test
+    plot(
+      NA,
+      NA,
+      xlab = "Temp. [\u00B0C]",
+      ylab = "TL [a.u.]",
+      xlim = c(
+        min(signal.integral.temperature) * 0.9,
+        max(signal.integral.temperature) * 1.1
+      ),
+      ylim = c(0, max(NTL.net.LnLx[, 2])),
+      main = expression(paste("Plateau test ", L[n], ",", L[x], " curves", sep =
+                                ""))
+    )
+
+
+    ##plot single curves
+    lines(NTL.net.LnLx, col = col[1])
+    lines(Reg1.net.LnLx, col = col[2])
+
+
+    ##plot
+    par(new = TRUE)
+    plot(
+      TL.Plateau.LnLx,
+      axes = FALSE,
+      xlab = "",
+      ylab = "",
+      ylim = c(0,
+               quantile(
+                 TL.Plateau.LnLx[c(signal.integral.min:signal.integral.max), 2],
+                 probs = c(0.90), na.rm = TRUE
+               ) + 3),
+      col = "darkgreen"
+    )
+    axis(4)
+
+
+    # Plotting Plateau Test TnTx -------------------------------------------------
+
+    ##get NTL signal
+    NTL.net.TnTx <-
+      data.frame(object at records[[TL.signal.ID[2]]]@data[, 1],
+                 object at records[[TL.signal.ID[2]]]@data[, 2] -
+                   object at records[[TL.background.ID[2]]]@data[, 2])
+
+    ##get signal from the first regeneration point
+    Reg1.net.TnTx <-
+      data.frame(object at records[[TL.signal.ID[4]]]@data[, 1],
+                 object at records[[TL.signal.ID[4]]]@data[, 2] -
+                   object at records[[TL.background.ID[4]]]@data[, 2])
+
+
+    ##combine values
+    TL.Plateau.TnTx <-
+      data.frame(NTL.net.TnTx[, 1], Reg1.net.TnTx[, 2] / NTL.net.TnTx[, 2])
+
+    ##Plot Plateau Test
+    plot(
+      NA,
+      NA,
+      xlab = "Temp. [\u00B0C]",
+      ylab = "TL [a.u.]",
+      xlim = c(
+        min(signal.integral.temperature) * 0.9,
+        max(signal.integral.temperature) * 1.1
+      ),
+      ylim = c(0, max(NTL.net.TnTx[, 2])),
+      main = expression(paste("plateau Test ", T[n], ",", T[x], " curves", sep =
+                                ""))
+    )
+
+
+    ##plot single curves
+    lines(NTL.net.TnTx, col = col[1])
+    lines(Reg1.net.TnTx, col = col[2])
+
+
+    ##plot
+    par(new = TRUE)
+    plot(
+      TL.Plateau.TnTx,
+      axes = FALSE,
+      xlab = "",
+      ylab = "",
+      ylim = c(0,
+               quantile(
+                 TL.Plateau.TnTx[c(signal.integral.min:signal.integral.max), 2],
+                 probs = c(0.90), na.rm = TRUE
+               ) + 3),
+      col = "darkgreen"
+    )
+    axis(4)
+
+
+
+
+    # Plotting Legend ----------------------------------------
+
+
+    plot(
+      c(1:(length(TL.signal.ID) / 2)),
+      rep(8, length(TL.signal.ID) / 2),
+      type = "p",
+      axes = FALSE,
+      xlab = "",
+      ylab = "",
+      pch = 15,
+      col = col[1:length(TL.signal.ID)],
+      cex = 2,
+      ylim = c(0, 10)
+    )
+
+    ##add text
+    text(c(1:(length(TL.signal.ID) / 2)),
+         rep(4, length(TL.signal.ID) / 2),
+         paste(LnLxTnTx$Name, "\n(", LnLxTnTx$Dose, ")", sep = ""))
+
+    ##add line
+    abline(h = 10, lwd = 0.5)
+
+    ##set failed text and mark De as failed
+    if (length(grep("FAILED", RejectionCriteria$status)) > 0) {
+      mtext("[FAILED]", col = "red")
 
-  ##combine values
-  TL.Plateau.TnTx <- data.frame(NTL.net.TnTx[,1], Reg1.net.TnTx[,2]/NTL.net.TnTx[,2])
 
-  ##Plot Plateau Test
-  plot(NA, NA,
-       xlab = "Temp. [\u00B0C]",
-       ylab = "TL [a.u.]",
-       xlim = c(min(signal.integral.temperature)*0.9, max(signal.integral.temperature)*1.1),
-       ylim = c(0, max(NTL.net.TnTx[,2])),
-       main = expression(paste("plateau Test ",T[n],",",T[x]," curves",sep=""))
-  )
-
-
-  ##plot single curves
-  lines(NTL.net.TnTx, col=col[1])
-  lines(Reg1.net.TnTx, col=col[2])
-
-
-  ##plot
-  par(new=TRUE)
-  plot(TL.Plateau.TnTx,
-       axes=FALSE,
-       xlab="",
-       ylab="",
-       ylim=c(0,
-              quantile(TL.Plateau.TnTx[c(signal.integral.min:signal.integral.max),2],
-                       probs = c(0.90), na.rm = TRUE)+3),
-       col="darkgreen")
-  axis(4)
-
-
-
-
-  # Plotting Legend ----------------------------------------
-
-
-  plot(c(1:(length(TL.signal.ID)/2)),
-       rep(8,length(TL.signal.ID)/2),
-       type = "p",
-       axes=FALSE,
-       xlab="",
-       ylab="",
-       pch=15,
-       col=col[1:length(TL.signal.ID)],
-       cex=2,
-       ylim=c(0,10)
-  )
-
-  ##add text
-  text(c(1:(length(TL.signal.ID)/2)),
-       rep(4,length(TL.signal.ID)/2),
-       paste(LnLxTnTx$Name,"\n(",LnLxTnTx$Dose,")", sep="")
+    }
+  }
 
+  # Plotting  GC  ----------------------------------------
+  temp.sample <- data.frame(Dose=LnLxTnTx$Dose,
+                            LxTx=LnLxTnTx$LxTx,
+                            LxTx.Error=LnLxTnTx$LxTx.Error,
+                            TnTx=LnLxTnTx$TnTx
   )
 
-  ##add line
-  abline(h=10,lwd=0.5)
+  ##run curve fitting
+  temp.GC <- try(plot_GrowthCurve(
+    sample = temp.sample,
+    ...
+  ))
 
-  ##set failed text and mark De as failed
-  if(length(grep("FAILED",RejectionCriteria$status))>0){
-
-    mtext("[FAILED]", col="red")
+  ##check for error
+  if(inherits(temp.GC, "try-error")){
+    return(NULL)
 
+  }else{
+    temp.GC <- get_RLum(temp.GC)[, c("De", "De.Error")]
 
   }
 
-  ##reset par
-  par(par.default)
-  rm(par.default)
 
-  # Plotting  GC  ----------------------------------------
-  temp.sample <- data.frame(Dose=LnLxTnTx$Dose,
-                            LxTx=LnLxTnTx$LxTx,
-                            LxTx.Error=LnLxTnTx$LxTx*0.1,
-                            TnTx=LnLxTnTx$TnTx
-  )
-
-  temp.GC <- get_RLum(plot_GrowthCurve(temp.sample,
-                                               ...))[,c("De","De.Error")]
 
   ##add recjection status
   if(length(grep("FAILED",RejectionCriteria$status))>0){
-
     temp.GC <- data.frame(temp.GC, RC.Status="FAILED")
 
   }else{
-
     temp.GC <- data.frame(temp.GC, RC.Status="OK")
 
   }
diff --git a/R/analyse_baSAR.R b/R/analyse_baSAR.R
index fb08d99..4e0c05f 100644
--- a/R/analyse_baSAR.R
+++ b/R/analyse_baSAR.R
@@ -1,4 +1,4 @@
-#' Bayesian models (baSAR) applied on luminescence data
+ #' Bayesian models (baSAR) applied on luminescence data
 #'
 #' This function allows the application of Bayesian models on luminescence data, measured
 #' with the single-aliquot regenerative-dose (SAR, Murray and Wintle, 2000) protocol. In particular,
@@ -92,7 +92,7 @@
 #' (cf. \code{\link[rjags]{jags.model}})\cr
 #' \code{inits} \tab \code{\link{list}} \tab option to set initialisation values (cf. \code{\link[rjags]{jags.model}}) \cr
 #' \code{thin} \tab \code{\link{numeric}} \tab thinning interval for monitoring the Bayesian process (cf. \code{\link[rjags]{jags.model}})\cr
-#' \code{variables.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
+#' \code{variable.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
 #' \code{'central_D'}, \code{'sigma_D'}, \code{'D'}, \code{'Q'}, \code{'a'}, \code{'b'}, \code{'c'}, \code{'g'}.
 #' Note: only variables present in the model can be monitored.
 #' }
@@ -159,7 +159,8 @@
 #' providing a file connection. Mixing of both types is not allowed. If an \code{\linkS4class{RLum.Results}}
 #' is provided the function directly starts with the Bayesian Analysis (see details)
 #'
-#' @param XLS_file \code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements)
+#' @param XLS_file \code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements).
+#' Alternatively a \code{data.frame} of similar structure can be provided.
 #'
 #' @param aliquot_range \code{\link{numeric}} (optional): allows to limit the range of the aliquots
 #' used for the analysis. This argument has only an effect if the argument \code{XLS_file} is used or
@@ -286,7 +287,7 @@
 #' as geometric mean!}
 #'
 #'
-#' @section Function version: 0.1.25
+#' @section Function version: 0.1.29
 #'
 #' @author Norbert Mercier, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Sebastian Kreutzer,
 #' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
@@ -304,6 +305,10 @@
 #' A Bayesian central equivalent dose model for optically stimulated luminescence dating.
 #' Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001
 #'
+#' Mercier, N., Kreutzer, S., Christophe, C., Guerin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A.,
+#' Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its implementation
+#' in the R package 'Luminescence'. Ancient TL 34, 14-21.
+#'
 #' \bold{Further reading}
 #'
 #' Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013.
@@ -1062,7 +1067,6 @@ analyse_baSAR <- function(
       ##get BIN-file name
       object.file_name[[i]] <- unique(fileBIN.list[[i]]@METADATA[["FNAME"]])
 
-
     }
 
     ##check for duplicated entries; remove them as they would cause a function crash
@@ -1180,7 +1184,7 @@ analyse_baSAR <- function(
 
     ##select aliquots giving light only, this function accepts also a list as input
     if(verbose){
-      cat("\n[analyse_baSAR()] No XLS file provided, running automatic grain selection ...")
+      cat("\n[analyse_baSAR()] No XLS-file provided, running automatic grain selection ...")
 
     }
 
@@ -1236,7 +1240,7 @@ analyse_baSAR <- function(
       Nb_aliquots <- nrow(datalu)
 
       ##write information in variables
-      Disc[[k]] <-  datalu[["POSITION"]]
+      Disc[[k]] <- datalu[["POSITION"]]
       Grain[[k]] <- datalu[["GRAIN"]]
 
       ##free memory
@@ -1250,7 +1254,7 @@ analyse_baSAR <- function(
     if (is(XLS_file, "character")) {
       ##test for valid file
       if(!file.exists(XLS_file)){
-        stop("[analyse_baSAR()] Defined XLS_file does not exists!")
+        stop("[analyse_baSAR()] XLS_file does not exist!")
 
       }
 
@@ -1263,6 +1267,14 @@ analyse_baSAR <- function(
         skip = additional_arguments$skip
       ), stringsAsFactors = FALSE)
 
+      ###check whether data format is somehow odd, check only the first three columns
+      if(!all(grepl(colnames(datalu), pattern = " ")[1:3])){
+        stop("[analyse_baSAR()] One of the first three columns in your XLS_file has no column header. Your XLS_file requires
+             at least three columns for 'BIN_file', 'DISC' and 'GRAIN'",
+             call. = FALSE)
+
+      }
+
       ##get rid of empty rows if the BIN_FILE name column is empty
       datalu <- datalu[!is.na(datalu[[1]]), ]
 
@@ -1271,7 +1283,13 @@ analyse_baSAR <- function(
 
       datalu <- XLS_file
 
-      ##problem: the first column should be of type charcter, the others are
+      ##check number of number of columns in data.frame
+      if(ncol(datalu) < 3){
+        stop("[analyse_baSAR()] The data.frame provided via XLS_file should consist of at least three columns (see manual)!", call. = FALSE)
+
+      }
+
+      ##problem: the first column should be of type character, the others are
       ##of type numeric, unfortunately it is too risky to rely on the user, we do the
       ##proper conversion by ourself ...
       datalu[[1]] <- as.character(datalu[[1]])
@@ -1308,9 +1326,9 @@ analyse_baSAR <- function(
             split = ".",
             fixed = TRUE
           )[[1]][1],
-          x = object.file_name)
+          x = unlist(object.file_name))
 
-          nj <-  length(Disc[[k]]) + 1
+          nj <- length(Disc[[k]]) + 1
 
           Disc[[k]][nj] <-  as.numeric(datalu[nn, 2])
           Grain[[k]][nj] <-  as.numeric(datalu[nn, 3])
@@ -1579,7 +1597,7 @@ analyse_baSAR <- function(
       }
 
       if(is.null(background.integral.Tx[[k]])){
-        abline(v = range(background.integral[[k]]), lty = 2, col = "green")
+        abline(v = range(background.integral[[k]]), lty = 2, col = "red")
 
       }else{
         abline(v = range(background.integral.Tx[[k]]), lty = 2, col = "red")
@@ -1658,8 +1676,8 @@ analyse_baSAR <- function(
 
       TnTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]])
 
-      ##create needed data.frame
-      selected_sample <- data.frame (sample_dose, sample_LxTx, sample_sLxTx, TnTx)
+      ##create needed data.frame (this way to make sure that rows are doubled if something is missing)
+      selected_sample <- as.data.frame(cbind(sample_dose, sample_LxTx, sample_sLxTx, TnTx))
 
       ##call plot_GrowthCurve() to get De and De value
       fitcurve <-
@@ -1679,6 +1697,7 @@ analyse_baSAR <- function(
           main = paste0("ALQ: ", count," | POS: ", Disc[[k]][i], " | GRAIN: ", Grain[[k]][i])
         ))
 
+
         ##get data.frame with De values
         if(!is.null(fitcurve)){
           fitcurve_De <- get_RLum(fitcurve, data.object = "De")
@@ -2236,7 +2255,7 @@ analyse_baSAR <- function(
       if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <-  0 }
       if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <-  1 }
       if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <-  1 }
-      if (fit.force_through_origin == TRUE) {GC_Origin <- 1} else {GC_Origin <- 0}
+      if (fit.force_through_origin) {GC_Origin <- 0} else {GC_Origin <- 1}
 
       ##add choise for own provided model
       if(!is.null(baSAR_model)){
diff --git a/R/analyse_portableOSL.R b/R/analyse_portableOSL.R
new file mode 100644
index 0000000..d0d3ff6
--- /dev/null
+++ b/R/analyse_portableOSL.R
@@ -0,0 +1,261 @@
+#' Analyse portable CW-OSL measurements
+#'
+#' The function analyses CW-OSL curve data produced by a SUERC portable OSL reader and
+#' produces a combined plot of OSL/IRSL signal intensities, OSL/IRSL depletion ratios
+#' and the IRSL/OSL ratio.
+#'
+#' This function only works with \code{RLum.Analysis} objects produced by \code{\link{read_PSL2R}}.
+#' It further assumes (or rather requires) an equal amount of OSL and IRSL curves that
+#' are pairwise combined for calculating the IRSL/OSL ratio. For calculating the depletion ratios
+#' the cumulative signal of the last n channels (same number of channels as specified by \code{signal.integral})
+#' is divided by cumulative signal of the first n channels (\code{signal.integral}).
+#'
+#' @param object \code{\linkS4class{RLum.Analysis}} (\bold{required}):
+#' \code{RLum.Analysis} object produced by \code{\link{read_PSL2R}}.
+#'
+#' @param signal.integral \code{\link{vector}} (\bold{required}): A vector of two values
+#' specifying the lower and upper channel used to calculate the OSL/IRSL signal. Can
+#' be provided in form of \code{c(1, 5)} or \code{1:5}.
+#'
+#'
+#' @param invert \code{\link{logical}} (with default): \code{TRUE} to calculate
+#' and plot the data in reverse order.
+#'
+#' @param normalise \code{\link{logical}} (with default):
+#' \code{TRUE} to normalise the OSL/IRSL signals by the mean of all corresponding
+#' data curves.
+#'
+#' @param plot \code{\link{logical}} (with default): enable/disable plot output
+#'
+#' @param ... currently not used.
+#'
+#' @return Returns an S4 \code{\linkS4class{RLum.Results}} object containing
+#' the following elements:
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @section Function version: 0.0.3
+#'
+#' @keywords datagen plot
+#'
+#' @examples
+#'
+#' # (1) load example data set
+#' data("ExampleData.portableOSL", envir = environment())
+#'
+#' # (2) merge and plot all RLum.Analysis objects
+#' merged <- merge_RLum(ExampleData.portableOSL)
+#' plot_RLum(merged, combine = TRUE)
+#' merged
+#'
+#' # (3) analyse and plot
+#' results <- analyse_portableOSL(merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE)
+#' get_RLum(results)
+#'
+#'
+#'
+#' @export
+analyse_portableOSL <-
+  function(object,
+           signal.integral,
+           invert = FALSE,
+           normalise = FALSE,
+           plot = TRUE,
+           ...)
+  {
+
+  ## INPUT VERIFICATION ----
+  if (!inherits(object, "RLum.Analysis"))
+    stop("Only objects of class 'RLum.Analysis' are allowed.", call. = FALSE)
+  if (!all(sapply(object, class) == "RLum.Data.Curve"))
+    stop("The 'RLum.Analysis' object must only contain objects of class 'RLum.Data.Curve'.", call. = FALSE)
+  if (!all(sapply(object, function(x) x at originator) == "read_PSL2R"))
+    stop("Only objects originating from 'read_PSL2R()' are allowed.", call. = FALSE)
+
+  if (missing(signal.integral)) {
+    signal.integral <- c(1, 1)
+    warning("No value for 'signal.integral' provided. Only the first data point of each curve was used!",
+            call. = FALSE)
+  }
+
+  ## CALCULATIONS ----
+
+  # OSL
+  OSL <- get_RLum(object, recordType = "OSL")
+  OSL <- do.call(rbind, lapply(OSL, function(x) {
+    posl_get_signal(x, signal.integral)
+  }))
+
+  # IRSL
+  IRSL <- get_RLum(object, recordType = "IRSL")
+  IRSL <- do.call(rbind, lapply(IRSL, function(x) {
+    posl_get_signal(x, signal.integral)
+  }))
+
+
+  ## NORMALISE ----
+  if (normalise) {
+    OSL <- posl_normalise(OSL)
+    IRSL <- posl_normalise(IRSL)
+  }
+
+  ## INVERT ----
+  if (invert) {
+    OSL <- posl_invert(OSL)
+    IRSL <- posl_invert(IRSL)
+  }
+
+  # OSL/IRSL Ratio
+  RATIO <- IRSL$sum_signal / OSL$sum_signal
+
+  ## PLOTTING ----
+  if (plot) {
+    par.old.full <- par(no.readonly = TRUE)
+    on.exit(par(par.old.full))
+
+    # default: par(mar = c(5, 4, 4, 2) + 0.1) // bottom, left, top, right
+    par(mfrow = c(1, 6))
+
+    par(mar = c(5, 4, 4, 1) + 0.1)
+
+    frame()
+
+    par(mar = c(5, 0, 4, 1) + 0.1)
+
+    plot(
+      OSL$sum_signal,
+      1:nrow(OSL),
+      type = "b",
+      pch = 16,
+      col = "blue",
+      xlim = range(pretty(OSL$sum_signal)),
+      xlab = "BSL",
+      ylab = "Index",
+      bty = "n",
+      yaxt = "n"
+    )
+    axis(2, line = 3, at = 1:nrow(OSL))
+    axis(3)
+    mtext("Index", side = 2, line = 6)
+
+    plot(
+      IRSL$sum_signal,
+      1:nrow(IRSL),
+      type = "b",
+      pch = 16,
+      col = "red",
+      xlim = range(pretty(IRSL$sum_signal)),
+      xlab = "IRSL",
+      ylab = "",
+      bty = "n",
+      yaxt = "n"
+    )
+    axis(3)
+
+    plot(
+      OSL$sum_signal_depletion,
+      1:nrow(OSL),
+      type = "b",
+      pch = 1,
+      col = "blue",
+      xlim = range(pretty(OSL$sum_signal_depletion)),
+      xlab = "BSL depl.",
+      ylab = "",
+      bty = "n",
+      yaxt = "n",
+      lty = 2
+    )
+    axis(3)
+
+    plot(
+      IRSL$sum_signal_depletion,
+      1:nrow(IRSL),
+      type = "b",
+      pch = 1,
+      col = "red",
+      xlim = range(pretty(IRSL$sum_signal_depletion)),
+      xlab = "IRSL depl.",
+      ylab = "",
+      bty = "n",
+      yaxt = "n",
+      lty = 2
+    )
+    axis(3)
+
+    plot(
+      RATIO,
+      1:length(RATIO),
+      type = "b",
+      pch = 16,
+      col = "black",
+      xlim = range(pretty(RATIO)),
+      xlab = "IRSL/BSL",
+      ylab = "",
+      bty = "n",
+      yaxt = "n"
+    )
+    axis(3)
+  }
+
+  ## RETURN VALUE ----
+  call<- sys.call()
+  args <- as.list(call)[2:length(call)]
+  summary <- data.frame(BSL = OSL$sum_signal,
+                        BSL_error = OSL$sum_signal_err,
+                        IRSL = IRSL$sum_signal,
+                        IRSL_error = IRSL$sum_signal_err,
+                        BSL_depletion = OSL$sum_signal_depletion,
+                        IRSL_depletion = IRSL$sum_signal_depletion,
+                        IRSL_BSL_RATIO = RATIO)
+
+
+  newRLumResults <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary=summary,
+      data = object,
+      args=args
+    ),
+    info = list(call = call))
+
+  return(newRLumResults)
+
+}
+
+################################################################################
+##                              HELPER FUNCTIONS                              ##
+################################################################################
+
+## This extracts the relevant curve data information of the RLum.Data.Curve
+## objects
+posl_get_signal <- function(x, signal.integral) {
+    raw_signal <- get_RLum(x)[,2]
+    sigint <- range(signal.integral)
+    if (sigint[2] > length(raw_signal)) {
+      sigint[2] <- length(raw_signal)
+      warning("'signal.integral' (", paste(range(signal.integral), collapse = ", "),") ",
+              "exceeded the number of available data points (n = ", length(raw_signal),") and ",
+              "has been automatically reduced to the maximum number.", call. = FALSE)
+    }
+    sum_signal <- sum(raw_signal[sigint[1]:sigint[2]])
+    sum_signal_err <- sqrt(sum(x at info$raw_data$counts_per_cycle_error[sigint[1]:sigint[2]]^2))
+    sum_signal_depletion <- sum(raw_signal[(length(raw_signal)-length(sigint[1]:sigint[2])):length(raw_signal)]) / sum_signal
+    return(data.frame(sum_signal, sum_signal_err, sum_signal_depletion))
+}
+
+## This function normalises the data curve by the mean signal
+posl_normalise <- function(x) {
+  rel.error <- x$sum_signal_err / x$sum_signal
+  x$sum_signal <- x$sum_signal / mean(x$sum_signal)
+  x$sum_signal_err <- x$sum_signal * rel.error
+  x$sum_signal_depletion <- x$sum_signal_depletion / mean(x$sum_signal_depletion)
+  return(x)
+}
+
+## This function invertes the data.frame (useful when the sample are in inverse
+## stratigraphic order)
+posl_invert <- function(x) {
+  x <- x[nrow(x):1, ]
+}
diff --git a/R/app_RLum.R b/R/app_RLum.R
index 6afd0d8..b2e12e7 100644
--- a/R/app_RLum.R
+++ b/R/app_RLum.R
@@ -10,15 +10,15 @@
 #'
 #' @author Christoph Burow, University of Cologne (Germany)
 #'
-#' @section Function version: 0.1.0
+#' @section Function version: 0.1.1
 #'
 #' @export
-app_RLum <- function(app, ...) {
+app_RLum <- function(app = NULL, ...) {
   
   if (!requireNamespace("RLumShiny", quietly = TRUE))
     stop("Shiny applications require the 'RLumShiny' package. To install",
          " this package run 'install.packages('RLumShiny')' in your R console.", 
-         call. = FALSE)
+         call. = FALSE) 
   
   RLumShiny::app_RLum(app, ...)
 }
\ No newline at end of file
diff --git a/R/calc_AliquotSize.R b/R/calc_AliquotSize.R
index ba7b1de..08d296b 100644
--- a/R/calc_AliquotSize.R
+++ b/R/calc_AliquotSize.R
@@ -51,25 +51,32 @@
 #' @param grain.size \code{\link{numeric}} (\bold{required}): mean grain size
 #' (microns) or a range of grain sizes from which the mean grain size is
 #' computed (e.g. \code{c(100,200)}).
+#'
 #' @param sample.diameter \code{\link{numeric}} (\bold{required}): diameter
 #' (mm) of the targeted area on the sample carrier.
+#'
 #' @param packing.density \code{\link{numeric}} (with default) empirical value
 #' for mean packing density. \cr If \code{packing.density = "inf"} a hexagonal
 #' structure on an infinite plane with a packing density of \eqn{0.906\ldots}
 #' is assumed.
+#'
 #' @param MC \code{\link{logical}} (optional): if \code{TRUE} the function
 #' performs a monte carlo simulation for estimating the amount of grains on the
 #' sample carrier and assumes random errors in grain size distribution and
 #' packing density. Requires a vector with min and max grain size for
 #' \code{grain.size}. For more information see details.
+#'
 #' @param grains.counted \code{\link{numeric}} (optional) grains counted on a
 #' sample carrier. If a non-zero positive integer is provided this function
 #' will calculate the packing density of the aliquot. If more than one value is
 #' provided the mean packing density and its standard deviation is calculated.
 #' Note that this overrides \code{packing.density}.
+#'
 #' @param plot \code{\link{logical}} (with default): plot output
 #' (\code{TRUE}/\code{FALSE})
+#'
 #' @param \dots further arguments to pass (\code{main, xlab, MC.iter}).
+#'
 #' @return Returns a terminal output. In addition an
 #' \code{\linkS4class{RLum.Results}} object is returned containing the
 #' following element:
@@ -81,19 +88,33 @@
 #'
 #' The output should be accessed using the function
 #' \code{\link{get_RLum}}
+#'
 #' @section Function version: 0.31
+#'
 #' @author Christoph Burow, University of Cologne (Germany)
-#' @references Duller, G.A.T., 2008. Single-grain optical dating of Quaternary
+#'
+#' @references
+#' Duller, G.A.T., 2008. Single-grain optical dating of Quaternary
 #' sediments: why aliquot size matters in luminescence dating. Boreas 37,
-#' 589-612.  \cr\cr Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
-#' are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr \bold{Further
-#' reading} \cr\cr Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
+#' 589-612.
+#'
+#' Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
+#' are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr
+#'
+#' \bold{Further reading} \cr\cr
+#'
+#' Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
 #' Theorem on Circle Packing. \url{http://arxiv.org/pdf/1009.4322v1.pdf},
-#' 2013-09-13. \cr\cr Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
+#' 2013-09-13.
+#'
+#' Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
 #' Oestergard, P.R.J., 1998.  Dense packings of congruent circles in a circle.
-#' Discrete Mathematics 181, 139-154. \cr\cr Huang, W., Ye, T., 2011. Global
+#' Discrete Mathematics 181, 139-154.
+#'
+#' Huang, W., Ye, T., 2011. Global
 #' optimization method for finding dense packings of equal circles in a circle.
 #' European Journal of Operational Research 210, 474-481.
+#'
 #' @examples
 #'
 #' ## Estimate the amount of grains on a small aliquot
@@ -159,14 +180,13 @@ calc_AliquotSize <- function(
   ## ... ARGUMENTS
   ##==========================================================================##
 
-  extraArgs <- list(...)
+  # set default parameters
+  settings <- list(MC.iter = 10^4,
+                   verbose = TRUE)
+
+  # override settings with user arguments
+  settings <- modifyList(settings, list(...))
 
-  ## set number of Monte Carlo iterations
-  if("MC.iter" %in% names(extraArgs)) {
-    MC.iter<- extraArgs$MC.iter
-  } else {
-    MC.iter<- 10^4
-  }
 
   ##==========================================================================##
   ## CALCULATIONS
@@ -204,7 +224,7 @@ calc_AliquotSize <- function(
       # create a random set of packing densities assuming a normal
       # distribution with the empirically determined standard deviation of
       # 0.18.
-      d.mc<- rnorm(MC.iter, packing.density, 0.18)
+      d.mc<- rnorm(settings$MC.iter, packing.density, 0.18)
 
       # in a PECC the packing density can not be larger than ~0.87
       d.mc[which(d.mc > 0.87)]<- 0.87
@@ -213,7 +233,7 @@ calc_AliquotSize <- function(
       # create a random set of sample diameters assuming a normal
       # distribution with an assumed standard deviation of
       # 0.2. For a more conservative estimate this is divided by 2.
-      sd.mc<- rnorm(MC.iter, sample.diameter, 0.2)
+      sd.mc<- rnorm(settings$MC.iter, sample.diameter, 0.2)
 
       # it is assumed that sample diameters < 0.5 mm either do not
       # occur, or are discarded. Either way, any smaller sample
@@ -229,7 +249,7 @@ calc_AliquotSize <- function(
       # as standard deviation. For a more conservative estimate this
       # is further devided by 2, so half the range is regarded as
       # two sigma.
-      gs.mc<- rnorm(MC.iter, grain.size, diff(gs.range)/4)
+      gs.mc<- rnorm(settings$MC.iter, grain.size, diff(gs.range)/4)
 
       # draw random samples from the grain size spectrum (gs.mc) and calculate
       # the mean for each sample. This gives an approximation of the variation
@@ -297,50 +317,52 @@ calc_AliquotSize <- function(
   ##==========================================================================##
   ##TERMINAL OUTPUT
   ##==========================================================================##
-
-  cat("\n [calc_AliquotSize]")
-  cat(paste("\n\n ---------------------------------------------------------"))
-  cat(paste("\n mean grain size (microns)  :", grain.size))
-  cat(paste("\n sample diameter (mm)       :", sample.diameter))
-  if(missing(grains.counted) == FALSE) {
-    if(length(grains.counted) == 1) {
-      cat(paste("\n counted grains             :", grains.counted))
-    } else {
-      cat(paste("\n mean counted grains        :", round(mean(grains.counted))))
+  if (settings$verbose) {
+
+    cat("\n [calc_AliquotSize]")
+    cat(paste("\n\n ---------------------------------------------------------"))
+    cat(paste("\n mean grain size (microns)  :", grain.size))
+    cat(paste("\n sample diameter (mm)       :", sample.diameter))
+    if(missing(grains.counted) == FALSE) {
+      if(length(grains.counted) == 1) {
+        cat(paste("\n counted grains             :", grains.counted))
+      } else {
+        cat(paste("\n mean counted grains        :", round(mean(grains.counted))))
+      }
     }
-  }
-  if(missing(grains.counted) == TRUE) {
-    cat(paste("\n packing density            :", round(packing.density,3)))
-  }
-  if(missing(grains.counted) == FALSE) {
-    if(length(grains.counted) == 1) {
+    if(missing(grains.counted) == TRUE) {
       cat(paste("\n packing density            :", round(packing.density,3)))
-    } else {
-      cat(paste("\n mean packing density       :", round(mean(packing.densities),3)))
-      cat(paste("\n standard deviation         :", round(std.d,3)))
     }
-  }
-  if(missing(grains.counted) == TRUE) {
-    cat(paste("\n number of grains           :", round(n.grains,0)))
-  }
+    if(missing(grains.counted) == FALSE) {
+      if(length(grains.counted) == 1) {
+        cat(paste("\n packing density            :", round(packing.density,3)))
+      } else {
+        cat(paste("\n mean packing density       :", round(mean(packing.densities),3)))
+        cat(paste("\n standard deviation         :", round(std.d,3)))
+      }
+    }
+    if(missing(grains.counted) == TRUE) {
+      cat(paste("\n number of grains           :", round(n.grains,0)))
+    }
 
 
 
-  if(MC == TRUE && range.flag == TRUE) {
-    cat(paste(cat(paste("\n\n --------------- Monte Carlo Estimates -------------------"))))
-    cat(paste("\n number of iterations (n)     :", MC.iter))
-    cat(paste("\n median                       :", round(MC.stats$median)))
-    cat(paste("\n mean                         :", round(MC.stats$mean)))
-    cat(paste("\n standard deviation (mean)    :", round(MC.stats$sd.abs)))
-    cat(paste("\n standard error (mean)        :", round(MC.stats$se.abs, 1)))
-    cat(paste("\n 95% CI from t-test (mean)    :", round(MC.t.lower), "-", round(MC.t.upper)))
-    cat(paste("\n standard error from CI (mean):", round(MC.t.se, 1)))
-    cat(paste("\n ---------------------------------------------------------\n"))
+    if(MC == TRUE && range.flag == TRUE) {
+      cat(paste(cat(paste("\n\n --------------- Monte Carlo Estimates -------------------"))))
+      cat(paste("\n number of iterations (n)     :", settings$MC.iter))
+      cat(paste("\n median                       :", round(MC.stats$median)))
+      cat(paste("\n mean                         :", round(MC.stats$mean)))
+      cat(paste("\n standard deviation (mean)    :", round(MC.stats$sd.abs)))
+      cat(paste("\n standard error (mean)        :", round(MC.stats$se.abs, 1)))
+      cat(paste("\n 95% CI from t-test (mean)    :", round(MC.t.lower), "-", round(MC.t.upper)))
+      cat(paste("\n standard error from CI (mean):", round(MC.t.se, 1)))
+      cat(paste("\n ---------------------------------------------------------\n"))
 
-  } else {
-    cat(paste("\n ---------------------------------------------------------\n"))
-  }
+    } else {
+      cat(paste("\n ---------------------------------------------------------\n"))
+    }
 
+  }
   ##==========================================================================##
   ##RETURN VALUES
   ##==========================================================================##
@@ -377,7 +399,7 @@ calc_AliquotSize <- function(
     }
   }
 
-  if(MC == FALSE) {
+  if(!MC) {
     MC.n<- NULL
     MC.stats<- NULL
     MC.n.kde<- NULL
@@ -388,21 +410,20 @@ calc_AliquotSize <- function(
   if(missing(grains.counted)) grains.counted<- NA
 
   call<- sys.call()
-  args<- list(grain.size = grain.size, sample.diameter = sample.diameter, packing.density = packing.density, MC = MC, grains.counted = grains.counted, MC.iter=MC.iter)
+  args<- as.list(sys.call())[-1]
 
   # create S4 object
   newRLumResults.calc_AliquotSize <- set_RLum(
     class = "RLum.Results",
     data = list(
       summary=summary,
-      args=args,
-      call=call,
       MC=list(estimates=MC.n,
               statistics=MC.stats,
               kde=MC.n.kde,
               t.test=MC.t.test,
-              quantile=MC.q)
-    ))
+              quantile=MC.q)),
+    info = list(call=call,
+                args=args))
 
   ##=========##
   ## PLOTTING
diff --git a/R/calc_AverageDose.R b/R/calc_AverageDose.R
new file mode 100644
index 0000000..8dbc635
--- /dev/null
+++ b/R/calc_AverageDose.R
@@ -0,0 +1,517 @@
+#'Calculate the Average Dose and the dose rate dispersion
+#'
+#'This functions calculates the Average Dose and their extrinsic dispersion and estimates
+#'the standard errors by bootstrapping based on the Average Dose Model by Guerin et al., 2017
+#'
+#'\bold{\code{sigma_m}}\cr
+#'
+#'The program requires the input of a known value of sigma_m,
+#'which corresponds to the intrinsic overdispersion, as determined
+#'by a dose recovery experiment. Then the dispersion in doses (sigma_d)
+#'will be that over and above sigma_m (and individual uncertainties sigma_wi).
+#'
+#' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
+#' (\bold{required}): for \code{data.frame}: two columns with De
+#' \code{(data[,1])} and De error \code{(values[,2])}
+#'
+#' @param sigma_m \code{\link{numeric}} (\bold{required}): the overdispersion resulting from a dose recovery
+#' experiment, i.e. when all grains have  received the same dose. Indeed in such a case, any
+#' overdispersion (i.e. dispersion on top of analytical uncertainties) is, by definition, an
+#' unrecognised measurement uncertainty.
+#'
+#' @param Nb_BE \code{\link{integer}} (with default): sample size used for the bootstrapping
+#'
+#' @param na.rm \code{\link{logical}} (with default): exclude NA values
+#' from the data set prior to any further operation.
+#'
+#' @param plot \code{\link{logical}} (with default): enables/disables plot output
+#'
+#' @param verbose \code{\link{logical}} (with default): enables/disables terminal output
+#'
+#' @param ... further arguments that can be passed to \code{\link[graphics]{hist}}. As three plots
+#' are returned all arguments need to be provided as \code{\link{list}},
+#' e.g., \code{main = list("Plot 1", "Plot 2", "Plot 3")}. Note: not all arguments of \code{hist} are
+#' supported, but the output of \code{hist} is returned and can be used of own plots. \cr
+#'
+#' Further supported arguments: \code{mtext} (\code{character}), \code{rug} (\code{TRUE/FALSE}).
+#'
+#' @section Function version: 0.1.4
+#'
+#' @author Claire Christophe, IRAMAT-CRP2A, Universite de Nantes (France),
+#' Anne Philippe, Universite de Nantes, (France),
+#' Guillaume Guerin, IRAMAT-CRP2A, Universite Bordeaux Montaigne, (France),
+#' Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne, (France)
+#'
+#' @seealso \code{\link{read.table}}, \code{\link[graphics]{hist}}
+#'
+#' @return The function returns numerical output and an (optional) plot.
+#'
+#' -----------------------------------\cr
+#' [ NUMERICAL OUTPUT ]\cr
+#' -----------------------------------\cr
+#' \bold{\code{RLum.Reuslts}}-object\cr
+#'
+#' \bold{slot:} \bold{\code{@data}} \cr
+#'
+#' [.. $summary : \code{data.frame}]\cr
+#'
+#' \tabular{lll}{
+#' \bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+#'  AVERAGE_DOSE \tab \code{numeric} \tab the obtained averge dose\cr
+#'  AVERAGE_DOSE.SE \tab \code{numeric} \tab the average dose error \cr
+#'  SIGMA_D \tab \code{numeric}\tab sigma \cr
+#'  SIGMA_D.SE \tab \code{numeric}\tab standard error of the sigma  \cr
+#'  IC_AVERAGE_DOSE.LEVEL  \tab \code{character}\tab confidence level average dose\cr
+#'  IC_AVERAGE_DOSE.LOWER  \tab \code{charcter}\tab lower quantile of average dose \cr
+#'  IC_AVERAGE_DOSE.UPPER \tab \code{character}\tab upper quantile of average dose\cr
+#'  IC_SIGMA_D.LEVEL \tab \code{integer}\tab confidence level sigma\cr
+#'  IC_SIGMA_D.LOWER \tab \code{character}\tab lower sigma quantile\cr
+#'  IC_SIGMA_D.UPPER \tab \code{character}\tab upper sigma quantile\cr
+#'  L_MAX \tab \code{character}\tab maximum likelihood value
+#' }
+#'
+#' [.. $dstar : \code{matrix}]\cr
+#'
+#' Matrix with bootstrap values\cr
+#'
+#' [.. $hist : \code{list}]\cr
+#'
+#' Object as produced by the function histogram
+#'
+#' ------------------------\cr
+#' [ PLOT OUTPUT ]\cr
+#' ------------------------\cr
+#'
+#' The function returns two different plot panels.
+#'
+#' (1) An abanico plot with the dose values
+#'
+#' (2) A histogram panel comprising 3 histograms with the equivalent dose and the bootstrapped average
+#' dose and the sigma values.
+#'
+#' @references
+#' Guerin, G., Christophe, C., Philippe, A., Murray, A.S., Thomsen, K.J., Tribolo, C., Urbanova, P.,
+#' Jain, M., Guibert, P., Mercier, N., Kreutzer, S., Lahaye, C., 2017. Absorbed dose, equivalent dose,
+#' measured dose rates, and implications for OSL age estimates: Introducing the Average Dose Model.
+#' Quaternary Geochronology 1-32. doi:10.1016/j.quageo.2017.04.002
+#'
+#' \bold{Further reading}\cr
+#'
+#' Efron, B., Tibshirani, R., 1986. Bootstrap Methods for Standard Errors, Confidence Intervals,
+#' and Other Measures of Statistical Accuracy. Statistical Science 1, 54-75.
+#'
+#' @note This function has beta status!
+#'
+#' @keywords datagen
+#'
+#' @examples
+#'
+#'##Example 01 using package example data
+#'##load example data
+#'data(ExampleData.DeValues, envir = environment())
+#'
+#'##calculate Average dose
+#'##(use only the first 56 values here)
+#'AD <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,],
+#'sigma_m = 0.1)
+#'
+#'##plot De and set Average dose as central value
+#'plot_AbanicoPlot(
+#'  data = ExampleData.DeValues$CA1[1:56,],
+#'  z.0 = AD$summary$AVERAGE_DOSE)
+#'
+#'@export
+calc_AverageDose <- function(
+  data,
+  sigma_m = NULL,
+  Nb_BE = 500,
+  na.rm = TRUE,
+  plot = TRUE,
+  verbose = TRUE,
+  ...
+){
+
+  # Define internal functions ------------------------------------------------------------------
+
+  # function which compute mle's for data (yu,su)
+  .mle <- function(yu , su, wu.start, sigma_d.start, delta.start){
+
+    ##set start parameters, otherwise the function will try to get them
+    ##from the parent environment, which is not wanted ...
+    delta.temp <- 0
+    sigma_d.temp <- 0
+
+    sigma_d <- sigma_d.start
+    delta <- delta.start
+    wu <- wu.start
+
+    j <- 0
+    iteration_limit <- 10000
+
+    ##loop until convergence or the iteration limit is reached
+    while(j < iteration_limit) {
+
+      ##code by Claire; in the 2nd and 3rd line delta and sigma_d are replaced by delta.temp and
+      ##sigma_d.temp; otherwise the iteration and its test for convergence will not work
+      delta.temp <- exp( sum(wu*(yu+(0.5*(sigma_d^2)))) / sum(wu) )
+      sigma_d.temp <- sigma_d*sum( (wu^2) * (yu-log(delta.temp)+0.5*sigma_d^2)^2) / (sum( wu*(1+yu-log(delta.temp)+0.5*sigma_d^2)))
+      wu <- 1/(sigma_d.temp^2 + su^2)
+
+      ##break loop if convergence is reached ... if not update values
+      if(is.infinite(delta.temp) | is.infinite(sigma_d.temp)){
+        break()
+
+      }else if (
+        ##compare values ... if they are equal we have convergence
+        all(
+          c(round(c(delta, sigma_d), 4)) == c(round(c(delta.temp, sigma_d.temp), 4))
+          )
+        ) {
+        break()
+
+      } else{
+        ##update input values
+        delta <- delta.temp
+        sigma_d <- sigma_d.temp
+        j <- j + 1
+
+      }
+
+    }
+
+    ##if no convergence was reached stop entire function; no stop as this may happen during the
+    ##bootstraping procedure
+    if(j == iteration_limit){
+      warning("[calc_AverageDoseModel()] .mle() no convergence reached for the given limits. NA returned!")
+      return(c(NA,NA))
+
+    }else if(is.infinite(delta.temp) | is.infinite(sigma_d.temp)){
+      warning("[calc_AverageDoseModel()] .mle() gaves Inf values. NA returned!")
+      return(c(NA,NA))
+
+    }else{
+      return(c(round(c(delta, sigma_d),4)))
+
+    }
+
+  }
+
+  .CredibleInterval <- function(a_chain, level = 0.95) {
+    ## Aim : estimation of the shortest credible interval of the sample of parameter a
+    # A level % credible interval is an interval that keeps N*(1-level) elements of the sample
+    # The level % credible interval is the shortest of all those intervals.
+    ## Parameters :
+    # a_chain : the name of the values of the parameter a
+    # level : the level of the credible interval expected
+    ## Returns : the level and the endpoints
+
+    sorted_sample <- sort(a_chain)
+    N <- length(a_chain)
+    OutSample <- N * (1 - level)
+
+    I <- cbind(sorted_sample[1:(OutSample + 1)] , sorted_sample[(N - OutSample):N])
+
+    l <-  I[, 2] - I[, 1] # length of intervals
+    i <- which.min(l) # look for the shortest interval
+
+    return(c(
+      level = level,
+      CredibleIntervalInf = I[i, 1],
+      CredibleIntervalSup = I[i, 2]
+    ))
+
+  }
+
+  ##////////////////////////////////////////////////////////////////////////////////////////////////
+  ##HERE THE MAIN FUNCTION STARTS
+  ##////////////////////////////////////////////////////////////////////////////////////////////////
+
+  # Integrity checks ----------------------------------------------------------------------------
+
+  if(!is(data, "RLum.Results") & !is(data, "data.frame")){
+    stop("[calc_AverageDose()] input is neither of type 'RLum.Results' nor of type 'data.frame'!")
+
+  }else {
+
+    if(is(data, "RLum.Results")){
+     data <- get_RLum(data)
+
+    }
+
+  }
+
+  if(is.null(sigma_m)){
+    stop("[calc_AverageDose()] 'sigma_m' is missing but required")
+
+  }
+
+  # Data preparation -----------------------------------------------------------------------------
+
+  ##problem: the entire code refers to column names the user may not provide...
+  ##  >> to avoid changing the entire code, the data will shape to a format that
+  ##  >> fits to the code
+
+    ##check for number of columns
+    if(ncol(data)<2){
+      try(stop("[calc_AverageDose()] data set contains < 2 columns! NULL returned!", call. = FALSE))
+      return(NULL)
+
+    }
+
+    ##used only the first two colums
+    if(ncol(data)>2){
+      data <- data[,1:2]
+      warning("[calc_AverageDose()] number of columns in data set > 2. Only the first two columns were used.", call. = FALSE)
+    }
+
+    ##exclude NA values
+    if(any(is.na(data))){
+      data <- na.exclude(data)
+      warning("[calc_AverageDose()] NA values in data set detected. Rows with NA values removed!", call. = FALSE)
+
+    }
+
+    ##check data set
+    if(nrow(data) == 0){
+      try(stop("[calc_AverageDose()] data set contains 0 rows! NULL returned!", call. = FALSE))
+      return(NULL)
+
+    }
+
+    ##data becomes to dat (thus, make the code compatible with the code by Claire and Anne)
+    dat <- data
+
+    ##preset column names, as the code refers to it
+    colnames(dat) <- c("cd", "se")
+
+
+  # Pre calculation -----------------------------------------------------------------------------
+
+  ##calculate  yu = log(CD) and su = se(logCD)
+  yu <- log(dat$cd)
+
+  su <- sqrt((dat$se / dat$cd) ^ 2 + sigma_m ^ 2)
+
+  # calculate starting values and weights
+  sigma_d <- sd(dat$cd) / mean(dat$cd)
+  wu <- 1 / (sigma_d ^ 2 + su ^ 2)
+
+  delta <- mean(dat$cd)
+  n <- length(yu)
+
+  ##terminal output
+  if (verbose) {
+    cat("\n[calc_AverageDose()]")
+    cat("\n\n>> Initialisation <<")
+    cat(paste("\nn:\t\t", n))
+    cat(paste("\ndelta:\t\t", delta))
+    cat(paste("\nsigma_m:\t", sigma_m))
+    cat(paste("\nsigma_d:\t", sigma_d))
+  }
+
+
+  # mle's computation
+  dhat <- .mle(yu, su, wu.start = wu, sigma_d.start = sigma_d, delta.start = delta)
+  delta <- dhat[1]
+  sigma_d <- dhat[2]
+  wu <- 1 / (sigma_d ^ 2 + su ^ 2)
+
+  # maximum log likelihood
+  llik <- sum(-log(sqrt(2 * pi / wu)) - (wu / 2) * ((yu - log(delta) + 0.5 * (sigma_d ^ 2)) ^ 2))
+
+  ##terminal output
+  if(verbose){
+    cat(paste("\n\n>> Calculation <<\n"))
+    cat(paste("log likelihood:\t", round(llik, 4)))
+
+  }
+
+
+  # standard errors obtained by bootstrap, we refer to Efron B. and Tibshirani R. (1986)
+  # est ce qu'il faut citer l'article ici ou tout simplement dans la publi ?
+  n <- length(yu)
+
+  ##calculate dstar
+  ##set matrix for I
+  I <- matrix(data = sample(x = 1:n, size = n * Nb_BE, replace = TRUE), ncol = Nb_BE)
+
+  ##iterate over the matrix and produce dstar
+  ##(this looks a little bit complicated, but is far more efficient)
+  dstar <- t(vapply(
+    X = 1:Nb_BE,
+    FUN = function(x) {
+      .mle(yu[I[, x]], su[I[, x]], sigma_d.start = sigma_d, delta.start = delta, wu.start = wu)
+
+    },
+    FUN.VALUE = vector(mode = "numeric", length = 2)
+  ))
+
+  ##exclude NA values
+  dstar <- na.exclude(dstar)
+
+
+  ##calculate confidence intervalls
+  IC_delta <- .CredibleInterval(dstar[,1],0.95)
+  IC_sigma_d <- .CredibleInterval(dstar[,2],0.95)
+  IC <- rbind(IC_delta, IC_sigma_d)
+
+  # standard errors
+  sedelta <- sqrt ((1/(Nb_BE-1))*sum((dstar[,1]-mean(dstar[,1]))^2))
+  sesigma_d <- sqrt ((1/(Nb_BE-1))*sum((dstar[,2]-mean(dstar[,2]))^2))
+
+
+  ##Terminal output
+  if (verbose) {
+    cat("\nconfidence intervals\n")
+    cat("--------------------------------------------------\n")
+    print(t(IC), print.gap = 6, digits = 4)
+    cat("--------------------------------------------------\n")
+
+    cat(paste("\n>> Results <<\n"))
+    cat("----------------------------------------------------------\n")
+    cat(paste(
+      "Average dose:\t ",
+      round(delta, 4),
+      "\tse(Aver. dose):\t",
+      round(sedelta, 4)
+    ))
+    if(sigma_d == 0){
+      cat(paste(
+        "\nsigma_d:\t ",
+        round(sigma_d, 4),
+        "\t\tse(sigma_d):\t",
+        round(sesigma_d, 4)
+      ))
+
+    }else{
+    cat(paste(
+      "\nsigma_d:\t ",
+      round(sigma_d, 4),
+      "\tse(sigma_d):\t",
+      round(sesigma_d, 4)
+    ))
+    }
+    cat("\n----------------------------------------------------------\n")
+
+  }
+
+  ##compile final results data frame
+  results_df <- data.frame(
+    AVERAGE_DOSE = delta,
+    AVERAGE_DOSE.SE = sedelta,
+    SIGMA_D = sigma_d,
+    SIGMA_D.SE = sesigma_d,
+    IC_AVERAGE_DOSE.LEVEL = IC_delta[1],
+    IC_AVERAGE_DOSE.LOWER = IC_delta[2],
+    IC_AVERAGE_DOSE.UPPER = IC_delta[3],
+    IC_SIGMA_D.LEVEL = IC_sigma_d[1],
+    IC_SIGMA_D.LOWER = IC_sigma_d[2],
+    IC_SIGMA_D.UPPER = IC_sigma_d[3],
+    L_MAX = llik,
+    row.names = NULL
+
+  )
+
+  # Plotting ------------------------------------------------------------------------------------
+
+    ##the plotting (enable/disable) is controlled below, as with this
+    ##we always get a histogram object
+
+    ##set data list
+    data_list <- list(dat$cd, dstar[,1], dstar[,2])
+
+    ##preset plot arguments
+    plot_settings <- list(
+      breaks = list("FD", "FD", "FD"),
+      probability = list(FALSE, TRUE, TRUE),
+      main = list(
+        "Observed: Equivalent dose",
+        "Bootstrapping: Average Dose",
+        "Bootstrapping: Sigma_d"),
+      xlab = list(
+       "Equivalent dose [a.u.]",
+       "Average dose [a.u.]",
+       "Sigma_d"),
+      axes = list(TRUE, TRUE, TRUE),
+      col = NULL,
+      border = NULL,
+      density = NULL,
+      freq = NULL,
+      mtext = list(
+        paste("n = ", length(data_list[[1]])),
+        paste("n = ", length(data_list[[2]])),
+        paste("n = ", length(data_list[[3]]))),
+      rug = list(TRUE, TRUE, TRUE)
+
+    )
+
+    ##modify this list by values the user provides
+
+      ##expand all elements in the list
+      ##problem: the user might provid only one item, then the code will break
+      plot_settings.user <- lapply(list(...), function(x){
+        rep(x, length = 3)
+
+      })
+
+      ##modify
+      plot_settings <- modifyList(x =  plot_settings.user, val = plot_settings)
+
+
+    ##get change par setting and reset on exit
+    par.default <- par()$mfrow
+    on.exit(par(mfrow = par.default))
+    par(mfrow = c(1,3))
+
+    ##Produce plots
+    ##(1) - histogram of the observed equivalent dose
+    ##(2) - histogram of the bootstrapped De
+    ##(3) - histogram of the bootstrapped sigma_d
+
+    ##with lapply we get fetch also the return of hist, they user might want to use this later
+    hist <- lapply(1:length(data_list), function(x){
+      temp <- suppressWarnings(hist(
+        x = data_list[[x]],
+        breaks = plot_settings$breaks[[x]],
+        probability = plot_settings$probability[[x]],
+        main = plot_settings$main[[x]],
+        xlab = plot_settings$xlab[[x]],
+        axes = plot_settings$axes[[x]],
+        freq = plot_settings$freq[[x]],
+        plot = plot,
+        col = plot_settings$col[[x]],
+        border = plot_settings$border[[x]],
+        density = plot_settings$density[[x]]
+
+      ))
+
+      if (plot) {
+        ##add rug
+        if (plot_settings$rug[[x]]) {
+          rug(data_list[[x]])
+
+        }
+
+        ##plot mtext
+        mtext(side = 3,
+              text = plot_settings$mtext[[x]],
+              cex = par()$cex)
+      }
+
+      return(temp)
+
+    })
+
+  # Return --------------------------------------------------------------------------------------
+  set_RLum(
+    class = "RLum.Results",
+    data = list(
+      summary = results_df,
+      dstar = as.data.frame(dstar),
+      hist = hist
+      ),
+    info = list(call = sys.call())
+
+  )
+
+}
diff --git a/R/calc_CentralDose.R b/R/calc_CentralDose.R
index 1e0c4aa..5c7060e 100644
--- a/R/calc_CentralDose.R
+++ b/R/calc_CentralDose.R
@@ -17,12 +17,14 @@
 #'
 #' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
 #' (\bold{required}): for \code{data.frame}: two columns with De
-#' \code{(data[,1])} and De error \code{(values[,2])}
+#' \code{(data[,1])} and De error \code{(data[,2])}
 #' 
-#' @param sigmab \code{\link{numeric}} (with default): spread in De values
-#' given as a fraction (e.g. 0.2). This value represents the expected
-#' overdispersion in the data should the sample be well-bleached (Cunningham &
-#' Walling 2012, p. 100).
+#' @param sigmab \code{\link{numeric}} (with default): additional spread in De values.
+#' This value represents the expected overdispersion in the data should the sample be 
+#' well-bleached (Cunningham & Walling 2012, p. 100).
+#' \bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+#' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+#' sigmab must be provided in the same absolute units of the De values (seconds or Gray).
 #' 
 #' @param log \code{\link{logical}} (with default): fit the (un-)logged central
 #' age model to De data
@@ -42,7 +44,7 @@
 #'
 #' The output should be accessed using the function
 #' \code{\link{get_RLum}}
-#' @section Function version: 1.3.1
+#' @section Function version: 1.3.2
 #' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
 #' rewritten S script of Rex Galbraith, 2010 \cr
 #' @seealso \code{\link{plot}}, \code{\link{calc_CommonDose}},
@@ -240,7 +242,7 @@ calc_CentralDose <- function(data, sigmab, log = TRUE, plot = TRUE, ...) {
                         OD = out.sigma, OD_err = out.sesigma * 100, Lmax = Lmax)
   
   call <- sys.call()
-  args <- list(log = "TRUE", sigmab = sigmab)
+  args <- list(log = log, sigmab = sigmab)
   
   newRLumResults.calc_CentralDose <- set_RLum(class = "RLum.Results", 
                                               data = list(summary = summary, 
diff --git a/R/calc_CommonDose.R b/R/calc_CommonDose.R
index 863ea8b..486a2b2 100644
--- a/R/calc_CommonDose.R
+++ b/R/calc_CommonDose.R
@@ -16,13 +16,19 @@
 #' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
 #' (\bold{required}): for \code{data.frame}: two columns with De
 #' \code{(data[,1])} and De error \code{(values[,2])}
-#' @param sigmab \code{\link{numeric}} (with default): spread in De values
-#' given as a fraction (e.g. 0.2). This value represents the expected
-#' overdispersion in the data should the sample be well-bleached (Cunningham &
-#' Walling 2012, p. 100).
+#' 
+#' @param sigmab \code{\link{numeric}} (with default): additional spread in De values.
+#' This value represents the expected overdispersion in the data should the sample be 
+#' well-bleached (Cunningham & Walling 2012, p. 100).
+#' \bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+#' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+#' sigmab must be provided in the same absolute units of the De values (seconds or Gray).
+#' 
 #' @param log \code{\link{logical}} (with default): fit the (un-)logged common
 #' age model to De data
+#' 
 #' @param \dots currently not used.
+#' 
 #' @return Returns a terminal output. In addition an
 #' \code{\linkS4class{RLum.Results}} object is returned containing the
 #' following element:
@@ -33,10 +39,13 @@
 #'
 #' The output should be accessed using the function
 #' \code{\link{get_RLum}}
-#' @section Function version: 0.1
+#' @section Function version: 0.1.1
+#' 
 #' @author Christoph Burow, University of Cologne (Germany)
+#' 
 #' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
 #' \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
+#' 
 #' @references Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
 #' mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
 #' \cr\cr Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley,
@@ -58,6 +67,7 @@
 #' of optical dating of fluvial deposits. Quaternary Geochronology 1,
 #' 109-120.\cr\cr Rodnight, H., 2008. How many equivalent dose values are
 #' needed to obtain a reproducible distribution?. Ancient TL 26, 3-10.
+#' 
 #' @examples
 #'
 #' ## load example data
diff --git a/R/calc_CosmicDoseRate.R b/R/calc_CosmicDoseRate.R
index ff08c41..769b417 100644
--- a/R/calc_CosmicDoseRate.R
+++ b/R/calc_CosmicDoseRate.R
@@ -74,26 +74,37 @@
 #' @param depth \code{\link{numeric}} (\bold{required}): depth of overburden
 #' (m).  For more than one absorber use \cr \code{c(depth_1, depth_2, ...,
 #' depth_n)}
+#' 
 #' @param density \code{\link{numeric}} (\bold{required}): average overburden
 #' density (g/cm^3). For more than one absorber use \cr \code{c(density_1,
 #' density_2, ..., density_n)}
+#' 
 #' @param latitude \code{\link{numeric}} (\bold{required}): latitude (decimal
 #' degree), N positive
+#' 
 #' @param longitude \code{\link{numeric}} (\bold{required}): longitude (decimal
 #' degree), E positive
+#' 
 #' @param altitude \code{\link{numeric}} (\bold{required}): altitude (m above
 #' sea-level)
+#' 
 #' @param corr.fieldChanges \code{\link{logical}} (with default): correct for
 #' geomagnetic field changes after Prescott & Hutton (1994). Apply only when
 #' justified by the data.
+#' 
 #' @param est.age \code{\link{numeric}} (with default): estimated age range
 #' (ka) for geomagnetic field change correction (0-80 ka allowed)
+#' 
 #' @param half.depth \code{\link{logical}} (with default): How to overcome with
 #' varying overburden thickness. If \code{TRUE} only half the depth is used for
 #' calculation. Apply only when justified, i.e. when a constant sedimentation
 #' rate can safely be assumed.
+#' 
 #' @param error \code{\link{numeric}} (with default): general error
 #' (percentage) to be implemented on corrected cosmic dose rate estimate
+#' 
+#' @param ... further arguments (\code{verbose} to disable/enable console output).
+#' 
 #' @return Returns a terminal output. In addition an
 #' \code{\linkS4class{RLum.Results}} object is returned containing the
 #' following element:
@@ -207,10 +218,17 @@ calc_CosmicDoseRate<- function(
   corr.fieldChanges = FALSE,
   est.age = NA,
   half.depth = FALSE,
-  error = 10
+  error = 10,
+  ...
 ) {
 
   ##============================================================================##
+  ## ... ARGUMENTS
+  ##============================================================================##
+  settings <- list(verbose = TRUE)
+  settings <- modifyList(settings, list(...))
+  
+  ##============================================================================##
   ## CONSISTENCY CHECK OF INPUT DATA
   ##============================================================================##
 
@@ -446,13 +464,12 @@ calc_CosmicDoseRate<- function(
 
         dc<- dc * corr.fac
 
-        print(paste("corr.fac",corr.fac,"diff.one",diff.one,"alt.fac",alt.fac))
-
-      }
+        if (settings$verbose)
+          print(paste("corr.fac",corr.fac,"diff.one",diff.one,"alt.fac",alt.fac))
 
-      else {
-        cat(paste("\n No geomagnetic field change correction necessary for
-                geomagnetic latitude >35 degrees!"))
+      } else {
+        if (settings$verbose) 
+          cat(paste("\n No geomagnetic field change correction necessary for geomagnetic latitude >35 degrees!"))
       }
     }
 
@@ -479,27 +496,27 @@ calc_CosmicDoseRate<- function(
     ##============================================================================##
     ##TERMINAL OUTPUT
     ##============================================================================##
-
-    cat("\n\n [calc_CosmicDoseRate]")
-    cat(paste("\n\n ---------------------------------------------------------"))
-    cat(paste("\n depth (m)              :", depth))
-    cat(paste("\n density (g cm^-3)      :", density))
-    cat(paste("\n latitude (N deg.)      :", latitude))
-    cat(paste("\n longitude (E deg.)     :", longitude))
-    cat(paste("\n altitude (m)           :", altitude))
-    cat(paste("\n ---------------------------------------------------------"))
-    cat(paste("\n total absorber (g cm^-2)       :", round(hgcm[i]*100,3)))
-    cat(paste("\n"))
-    cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(d0,4)))
-    cat(paste("\n  [@sea-level & 55 deg. N G.lat]"))
-    cat(paste("\n"))
-    cat(paste("\n geomagnetic latitude (deg.)    :", round(true.gml,1)))
-    cat(paste("\n"))
-    cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(dc,4),"+-",
-              round(dc.err,4)))
-    cat(paste("\n  [corrected]                 "))
-    cat(paste("\n ---------------------------------------------------------\n\n"))
-
+    if (settings$verbose) {
+      cat("\n\n [calc_CosmicDoseRate]")
+      cat(paste("\n\n ---------------------------------------------------------"))
+      cat(paste("\n depth (m)              :", depth))
+      cat(paste("\n density (g cm^-3)      :", density))
+      cat(paste("\n latitude (N deg.)      :", latitude))
+      cat(paste("\n longitude (E deg.)     :", longitude))
+      cat(paste("\n altitude (m)           :", altitude))
+      cat(paste("\n ---------------------------------------------------------"))
+      cat(paste("\n total absorber (g cm^-2)       :", round(hgcm[i]*100,3)))
+      cat(paste("\n"))
+      cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(d0,4)))
+      cat(paste("\n  [@sea-level & 55 deg. N G.lat]"))
+      cat(paste("\n"))
+      cat(paste("\n geomagnetic latitude (deg.)    :", round(true.gml,1)))
+      cat(paste("\n"))
+      cat(paste("\n cosmic dose rate (Gy ka^-1)    :", round(dc,4),"+-",
+                round(dc.err,4)))
+      cat(paste("\n  [corrected]                 "))
+      cat(paste("\n ---------------------------------------------------------\n\n"))
+    }
     ##============================================================================##
     ##RETURN VALUES
     ##============================================================================##
@@ -547,10 +564,12 @@ calc_CosmicDoseRate<- function(
   } else {
 
     #terminal output
-    cat("\n\n [calc_CosmicDoseRate]")
-    cat(paste("\n\n Calculating cosmic dose rate for",length(depth),
-              "samples. \n\n"))
-    print(profile.results)
+    if (settings$verbose) {
+      cat("\n\n [calc_CosmicDoseRate]")
+      cat(paste("\n\n Calculating cosmic dose rate for",length(depth),
+                "samples. \n\n"))
+      print(profile.results)
+    }
 
     #return value
     add.info<- data.frame(latitude=latitude,longitude=longitude,
diff --git a/R/calc_FadingCorr.R b/R/calc_FadingCorr.R
index 0bef641..c4bdcba 100644
--- a/R/calc_FadingCorr.R
+++ b/R/calc_FadingCorr.R
@@ -85,6 +85,10 @@
 #' @param seed \code{\link{integer}} (optional): sets the seed for the random number generator
 #' in R using \code{\link{set.seed}}
 #'
+#' @param interval \code{\link{numeric}} (with default): a vector containing the end-points (age interval) of the
+#' interval to be searched for the root in 'ka'. This argument is passed to the function \code{\link[stats]{uniroot}}
+#' used for solving the equation.
+#'
 #' @param txtProgressBar \link{logical} (with default): enables or disables
 #' \code{\link{txtProgressBar}}
 #'
@@ -110,11 +114,10 @@
 #' }
 #'
 #'
-#' @note The upper age limit is set to 500 ka! \cr
-#' Special thanks to Sebastien Huot for his support and clarification via e-mail.
+#' @note Special thanks to Sebastien Huot for his support and clarification via e-mail.
 #'
 #'
-#' @section Function version: 0.4.1
+#' @section Function version: 0.4.2
 #'
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
@@ -171,6 +174,7 @@ calc_FadingCorr <- function(
   tc.g_value = tc,
   n.MC = 10000,
   seed = NULL,
+  interval = c(0.01,500),
   txtProgressBar = TRUE,
   verbose = TRUE
 ){
@@ -185,9 +189,8 @@ calc_FadingCorr <- function(
   if(class(g_value)[1] == "RLum.Results"){
     if(g_value at originator == "analyse_FadingMeasurement"){
 
-      g_value <- get_RLum(g_value)[,c("FIT", "SD")]
       tc <- get_RLum(g_value)[["TC"]]
-
+      g_value <- as.numeric(get_RLum(g_value)[,c("FIT", "SD")])
 
     }else{
       try(stop("[calc_FadingCorr()] Unknown originator for the provided RLum.Results object via 'g_value'!", call. = FALSE))
@@ -238,7 +241,7 @@ calc_FadingCorr <- function(
   temp <-
     uniroot(
       f,
-      c(0.1, 500),
+      interval = interval,
       tol = 0.001,
       tc = tc,
       af = age.faded[1],
@@ -303,7 +306,7 @@ calc_FadingCorr <- function(
     tempMC[i:j] <- suppressWarnings(vapply(X = 1:length(age.fadedMC), FUN = function(x) {
       temp <- try(uniroot(
         f,
-        c(0.1,500),
+        interval = interval,
         tol = 0.001,
         tc = tc,
         af = age.fadedMC[[x]],
diff --git a/R/calc_FastRatio.R b/R/calc_FastRatio.R
index 4445c9f..16dcc66 100644
--- a/R/calc_FastRatio.R
+++ b/R/calc_FastRatio.R
@@ -24,6 +24,11 @@
 #' 
 #' @param Ch_L1 \code{\link{numeric}} (with default): An integer specifying the channel for L1.
 #' 
+#' @param Ch_L2 \code{\link{numeric}} (optional): An integer specifying the channel for L2.
+#' 
+#' @param Ch_L3 \code{\link{numeric}} (optional): A vector of length 2 with integer
+#' values specifying the start and end channels for L3 (e.g., \code{c(40, 50)}).
+#' 
 #' @param x \code{\link{numeric}} (with default): \% of signal remaining from the fast component.
 #' Used to define the location of L2 and L3 (start).
 #' 
@@ -54,7 +59,7 @@
 #' \item{args}{\code{\link{list}} of used arguments}
 #' \item{call}{\code{\link{call}} the function call}
 #' 
-#' @section Function version: 0.1.0
+#' @section Function version: 0.1.1
 #'
 #' @author 
 #' Georgina King, University of Cologne (Germany) \cr
@@ -96,6 +101,8 @@ calc_FastRatio <- function(object,
                            sigmaF = 2.6E-17,
                            sigmaM = 4.28E-18,
                            Ch_L1 = 1,
+                           Ch_L2 = NULL,
+                           Ch_L3 = NULL,
                            x = 1,
                            x2 = 0.1,
                            dead.channels = c(0,0),
@@ -104,6 +111,10 @@ calc_FastRatio <- function(object,
                            plot = TRUE,
                            ...) {
   
+  ## Input verification --------------------------------------------------------
+  if (!is.null(Ch_L3) && length(Ch_L3) != 2)
+    stop("Input for 'Ch_L3' must be a vector of length 2 (e.g., c(40, 50).", call. = FALSE)
+  
   ## Input object handling -----------------------------------------------------
   if (inherits(object, "RLum.Analysis"))
     object <- get_RLum(object)
@@ -114,6 +125,7 @@ calc_FastRatio <- function(object,
   if (!inherits(object, "list"))
     object <-list(object)
   
+  
   ## Settings ------------------------------------------------------------------
   settings <- list(verbose = TRUE,
                    n.components.max = 3,
@@ -166,8 +178,8 @@ calc_FastRatio <- function(object,
       
       if (fitCW.sigma) {
         if (!inherits(fitCW.res, "try-error")) {
-          sigmaF <- get_RLum(fitCW.res, "output.table")$cs1
-          sigmaM <- get_RLum(fitCW.res, "output.table")$cs2
+          sigmaF <- get_RLum(fitCW.res)$cs1
+          sigmaM <- get_RLum(fitCW.res)$cs2
           if (settings$verbose) {
             message("\n [calc_FitCWCurve()]\n")
             message("New value for sigmaF: ", format(sigmaF, digits = 3, nsmall = 2))
@@ -193,12 +205,23 @@ calc_FastRatio <- function(object,
     ## The equivalent time in s of L1, L2, L3
     # Use these values to look up the channel
     t_L1 <- 0
-    t_L2 <- (log(x / 100)) / (-sigmaF * I0)
-    t_L3_start <- (log(x / 100)) / (-sigmaM * I0)
-    t_L3_end <- (log(x2 / 100)) / (-sigmaM * I0)
+    
+    if (is.null(Ch_L2))
+      t_L2 <- (log(x / 100)) / (-sigmaF * I0)
+    else 
+      t_L2 <- A[Ch_L2, 1]
+    
+    if (is.null(Ch_L3)) {
+      t_L3_start <- (log(x / 100)) / (-sigmaM * I0)
+      t_L3_end <- (log(x2 / 100)) / (-sigmaM * I0)
+    } else {
+      t_L3_start <- A[Ch_L3[1], 1]
+      t_L3_end <- A[Ch_L3[2], 1]
+    }
     
     ## Channel number(s) of L2 and L3
-    Ch_L2 <- which.min(abs(A[,1] - t_L2))
+    if (is.null(Ch_L2))
+      Ch_L2 <- which.min(abs(A[,1] - t_L2))
     
     if (Ch_L2 <= 1) {
       msg <- sprintf("Calculated time/channel for L2 is too small (%.f, %.f). Returned NULL.", 
diff --git a/R/calc_FiniteMixture.R b/R/calc_FiniteMixture.R
index 17c4c26..ea9bf11 100644
--- a/R/calc_FiniteMixture.R
+++ b/R/calc_FiniteMixture.R
@@ -561,7 +561,7 @@ calc_FiniteMixture <- function(
   ##=========##
   ## PLOTTING
   if(plot==TRUE) {
-    try(plot_RLum.Results(newRLumResults.calc_FiniteMixture, ...))
+    try(do.call(plot_RLum.Results, c(list(newRLumResults.calc_FiniteMixture), as.list(sys.call())[-c(1,2)])))
   }#endif::plot
 
   # Return values
diff --git a/R/calc_Kars2008.R b/R/calc_Kars2008.R
new file mode 100644
index 0000000..c0ea67b
--- /dev/null
+++ b/R/calc_Kars2008.R
@@ -0,0 +1,725 @@
+#' Apply the Kars et al. (2008) model
+#'
+#' A function to calculate the expected sample specific fraction of saturation
+#' following Kars et al. (2008) and Huntley (2006).
+#'
+#' This function applies the approach described in Kars et al. (2008),
+#' developed from the model of Huntley (2006) to calculate the expected sample
+#' specific fraction of saturation of a feldspar and also to calculate fading
+#' corrected age using this model. \eqn{\rho}' (\code{rhop}), the density of recombination
+#' centres, is a crucial parameter of this model and must be determined
+#' separately from a fading measurement. The function
+#' \code{\link[Luminescence]{analyse_FadingMeasurement}}
+#' can be used to calculate the sample specific \eqn{\rho}' value.
+#'
+#' Firstly the unfaded D0 value is determined through applying equation 5 of
+#' Kars et al. (2008) to the measured LxTx data as a function of irradiation
+#' time, and fitting the data with a single saturating exponential of the form:
+#'
+#' \deqn{LxTx(t*) = A x \phi(t*) x (1 - exp(-(t* / D0)))}
+#'
+#' where
+#'
+#' \deqn{\phi(t*) = exp(-\rho' x ln(1.8 x s_tilde x t*)^3)}
+#'
+#' after King et al. (2016) where \code{A} is a pre-exponential factor,
+#' \code{t*} (s) is the irradiation time, starting at the mid-point of
+#' irradiation (Auclair et al. 2003) and \code{s_tilde} (3x10^15 s^-1) is the athermal
+#' frequency factor after Huntley (2006). \cr
+#'
+#' Using fit parameters \code{A} and \code{D0}, the function then computes a natural dose
+#' response curve using the environmental dose rate, \code{D_dot} (Gy/s) and equations
+#' [1] and [2]. Computed LxTx values are then fitted using the
+#' \code{\link[Luminescence]{plot_GrowthCurve}} function and the laboratory measured LnTn can then
+#' be interpolated onto this curve to determine the fading corrected
+#' De value, from which the fading corrected age is calculated. \cr
+#'
+#' The \code{calc_Kars2008} function also calculates the level of saturation (n/N)
+#' and the field saturation (i.e. athermal steady state, (n/N)_SS) value for
+#' the sample under investigation using the sample specific \eqn{\rho}',
+#' unfaded \code{D0} and \code{D_dot} values, following the approach of Kars et al. (2008). \cr
+#'
+#' Uncertainties are reported at 1 sigma and are assumed to be normally
+#' distributed and are estimated using monte-carlo resamples (\code{n.MC = 1000})
+#' of \eqn{\rho}' and LxTx during dose response curve fitting, and of \eqn{\rho}'
+#' in the derivation of (n/N) and (n/N)_SS.
+#'
+#'
+#'
+#' @param data \code{\link{data.frame}} (\bold{required}):
+#' A three column data frame with numeric values on a) dose (s), b) LxTx and and
+#' c) LxTx error. If a two column data frame is provided it is automatically
+#' assumed that errors on LxTx are missing. A third column will be attached
+#' with an arbitrary 5 \% error on the provided LxTx values.\cr
+#' Can also be a wide table, i.e. a \code{\link{data.frame}} with a number of colums divisible by 3
+#' and where each triplet has the aforementioned column structure.
+#'
+#' @param rhop \code{\link{numeric}} (\bold{required}):
+#' The density of recombination centres (\eqn{\rho}') and its error (see Huntley 2006),
+#' given as numeric vector of length two. Note that \eqn{\rho}' must \bold{not} be
+#' provided as the common logarithm. Example: \code{rhop = c(2.92e-06, 4.93e-07)}.
+#'
+#' @param ddot \code{\link{numeric}} (\bold{required}):
+#' Environmental dose rate and its error, given as a numeric vector of length two.
+#' Expected unit: Gy/ka. Example: \code{ddot = c(3.7, 0.4)}.
+#'
+#' @param readerDdot \code{\linkS4class{RLum.Analysis}} (\bold{required}):
+#' Dose rate of the irradiation source of the OSL reader and its error,
+#' given as a numeric vector of length two.
+#' Expected unit: Gy/s. Example: \code{readerDdot = c(0.08, 0.01)}.
+#'
+#' @param normalise \code{\link{logical}} (with default):
+#' If \code{TRUE} (the default) all measured and computed LxTx values are
+#' normalised by the pre-exponential factor A (see details).
+#'
+#' @param summary \code{\link{logical}} (with default):
+#' If \code{TRUE} (the default) various parameters provided by the user
+#' and calculated by the model are added as text on the right-hand side of the
+#' plot.
+#'
+#' @param plot \code{\link{logical}} (with default): enables/disables plot output.
+#'
+#' @param ... further arguments passed to \code{\link{plot}} and
+#' \code{\link[Luminescence]{plot_GrowthCurve}}.
+#'
+#' @return An \code{\linkS4class{RLum.Results}} object is returned:
+#'
+#' Slot: \bold{@data}\cr
+#'
+#' \tabular{lll}{
+#' \bold{OBJECT} \tab \bold{TYPE} \tab \bold{COMMENT}\cr
+#' \code{results} \tab \code{data.frame} \tab results of the of Kars et al. 2008 model \cr
+#' \code{data} \tab \code{data.frame} \tab original input data \cr
+#' \code{Ln} \tab \code{numeric} \tab Ln and its error \cr
+#' \code{LxTx_tables} \tab \code{list} \tab A \code{list} of \code{data.frames}
+#' containing data on dose, LxTx and LxTx error for each of the dose response curves.
+#' Note that these \bold{do not} contain the natural Ln signal, which is provided separately. \cr
+#' \code{fits} \tab \code{list} \tab A \code{list} of \code{nls}
+#'  objects produced by \code{\link[minpack.lm]{nlsLM}} when fitting the dose response curves \cr
+#' }
+#'
+#' Slot: \bold{@info}\cr
+#'
+#' \tabular{lll}{
+#' \bold{OBJECT} \tab \bold{TYPE} \tab \bold{COMMENT} \cr
+#' \code{call} \tab \code{call} \tab the original function call \cr
+#' \code{args} \tab \code{list} \tab arguments of the original function call \cr
+#'
+#' }
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Georgina King, University of Cologne (Germany), \cr
+#' Christoph Burow, University of Cologne (Germany)
+#'
+#' @note \bold{This function has BETA status and should not be used for publication work!}
+#'
+#' @keywords datagen
+#'
+#' @references
+#'
+#' Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar
+#' IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021
+#'
+#' Huntley, D.J., 2006. An explanation of the power-law decay of luminescence.
+#' Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020
+#'
+#' King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016.
+#' Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004
+#'
+#'
+#' \bold{Further reading}
+#'
+#' Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct
+#' for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230.
+#'
+#' @examples
+#'
+#' ## Load example data (sample UNIL/NB123, see ?ExampleData.Fading)
+#' data("ExampleData.Fading", envir = environment())
+#'
+#' ## (1) Set all relevant parameters
+#' # a. fading measurement data (IR50)
+#' fading_data <- ExampleData.Fading$fading.data$IR50
+#'
+#' # b. Dose response curve data
+#' data <- ExampleData.Fading$equivalentDose.data$IR50
+#'
+#' ## (2) Define required function parameters
+#' ddot <- c(7.00, 0.004)
+#' readerDdot <- c(0.134, 0.0067)
+#'
+#' # Analyse fading measurement and get an estimate of rho'.
+#' # Note that the RLum.Results object can be directly used for further processing.
+#' # The number of MC runs is reduced for this example
+#' rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10)
+#'
+#' ## (3) Apply the Kars et al. (2008) model to the data
+#' kars <- calc_Kars2008(data = data,
+#'                       rhop = rhop,
+#'                       ddot = ddot,
+#'                       readerDdot = readerDdot,
+#'                       n.MC = 50
+#'                       )
+#' @export
+calc_Kars2008 <- function(data,
+                          rhop,
+                          ddot,
+                          readerDdot,
+                          normalise = TRUE,
+                          summary = TRUE,
+                          plot = TRUE,
+                          ...) {
+
+  ## Validate Input ------------------------------------------------------------
+
+  ## Check 'data'
+  # must be a data frame
+  if (is.data.frame(data)) {
+
+    if (ncol(data) == 2) {
+      warning("[calc_Kars2008] 'data' only had two columns. We assumed that",
+              " the errors on LxTx were missing and automatically added a",
+              " 5 % error.\n Please provide a data frame with three columns",
+              " if you wish to use actually measured LxTx errors.", call. = FALSE)
+      data[ ,3] <- data[ ,2] * 0.05
+    }
+
+    # check number of columns
+    if (ncol(data) %% 3 != 0) {
+      stop("[calc_Kars2008] the number of columns in 'data' must be a multiple of 3.", 
+           call. = FALSE)
+    } else {
+      # extract all LxTx values
+      data_tmp <- do.call(rbind, 
+                        lapply(seq(1, ncol(data), 3), function(col) {
+                          setNames(data[2:nrow(data), col:c(col+2)], c("dose", "LxTx", "LxTxError")) 
+                        })
+      )
+      # extract the LnTn values (assumed to be the first row) and calculate the column mean
+      LnTn_tmp <- do.call(rbind, 
+                          lapply(seq(1, ncol(data), 3), function(col) {
+                            setNames(data[1, col:c(col+2)], c("dose", "LxTx", "LxTxError")) 
+                          })
+      )
+      
+      # check whether the standard deviation of LnTn estimates or the largest
+      # individual error is highest, and take the larger one
+      LnTn_error_tmp <- max(c(sd(LnTn_tmp[ ,2]), mean(LnTn_tmp[ ,3])), na.rm = TRUE)
+      LnTn_tmp <- colMeans(LnTn_tmp)
+
+      # re-bind the data frame
+      data <- rbind(LnTn_tmp, data_tmp)
+      data[1, 3] <- LnTn_error_tmp
+      data <- data[complete.cases(data), ]
+    }
+    
+    
+  } else {
+    stop("\n[calc_Kars2008] 'data' must be a data frame.",
+         call. = FALSE)
+  }
+
+  ## Check 'rhop'
+  # check if numeric
+  if (is.numeric(rhop)) {
+
+    ### TODO: can be of length 2 if error
+    if (length(rhop) != 2)
+      stop("\n[calc_Kars2008] 'rhop' must be a vector of length two.",
+           call. = FALSE)
+
+    # alternatively, and RLum.Results object produced by analyse_FadingMeasurement()
+    # can be provided
+  } else if (inherits(rhop, "RLum.Results")) {
+
+    if (rhop at originator == "analyse_FadingMeasurement")
+      rhop <- c(rhop at data$rho_prime$MEAN,
+                rhop at data$rho_prime$SD)
+    else
+      stop("\n[calc_Kars2008] Only an 'RLum.Results' object produced by",
+           " 'analyse_FadingMeasurement()' is allowed as input for 'rhop'.",
+           call. = FALSE)
+  }
+
+  ## Check ddot & readerDdot
+  # check if numeric
+  if (any(sapply(list(ddot, readerDdot), is.numeric) == FALSE))
+    stop("\n[calc_Kars2008] 'ddot' and 'readerDdot' must be numeric values.",
+         call. = FALSE)
+  # check if length == 2
+  if (any(sapply(list(ddot, readerDdot), function(x) length(x) == 2) == FALSE))
+    stop("\n[calc_Kars2008] 'ddot' and 'readerDdot' must be of length 2.",
+         call. = FALSE)
+
+
+  ## Settings ------------------------------------------------------------------
+  settings <- list(verbose = TRUE,
+                   n.MC = 1000)
+  settings <- modifyList(settings, list(...))
+
+  ## Define Constants ----------------------------------------------------------
+  kb <- 8.617343 * 1e-5
+  alpha <- 1
+  Hs <- 3e15 # s value after Huntley (2006)
+  Ma <- 1e6 * 365.25 * 24 * 3600 #in seconds
+  ka <- Ma / 1000 #in seconds
+
+
+  ## Define Functions ----------------------------------------------------------
+  # fit data using using Eq 5. from Kars et al (2008) employing
+  # theta after King et al. (2016)
+  theta <- function(t, r) {
+    res <- exp(-r * log(1.8 * Hs * (0.5 * t))^3)
+    res[!is.finite(res)] <- 0
+    return(res)
+  }
+
+  ## Preprocessing -------------------------------------------------------------
+  readerDdot.error <- readerDdot[2]
+  readerDdot <- readerDdot[1]
+  ddot.error <- ddot[2]
+  ddot <- ddot[1]
+
+  colnames(data) <- c("dose", "LxTx", "LxTx.Error")
+  dosetime <- data[["dose"]][2:nrow(data)]
+  LxTx.measured <- data[["LxTx"]][2:nrow(data)]
+  LxTx.measured.error <- data[["LxTx.Error"]][2:nrow(data)]
+
+  #Keep LnTn separate for derivation of measured fraction of saturation
+  Ln <- data[["LxTx"]][1]
+  Ln.error <- data[["LxTx.Error"]][1]
+
+  ## (1) MEASURED ----------------------------------------------------
+  if (settings$verbose) cat("\n")
+
+  data.tmp <- data
+  data.tmp[ ,1] <- data.tmp[ ,1] * readerDdot
+  
+  GC.settings <- list(sample = data.tmp,
+                      mode = "interpolation",
+                      fit.method = "EXP",
+                      output.plot = plot,
+                      main = "Measured dose response curve",
+                      xlab = "Dose (Gy)",
+                      verbose = FALSE)
+  
+  GC.settings <- modifyList(GC.settings, list(...))
+  GC.settings$verbose <- FALSE
+
+  GC.measured <- try(do.call(plot_GrowthCurve, GC.settings))
+  
+  if (inherits(GC.measured, "try-error"))
+    stop("\n[calc_Kars2008()] Unable to fit growth curve to data", call. = FALSE)
+
+  # extract results and calculate age
+  GC.results <- get_RLum(GC.measured)
+  fit_measured <- GC.measured at data$Fit
+  De.measured <- GC.results$De
+  De.measured.error <- GC.results$De.Error
+  D0.measured <- GC.results$D01
+  D0.measured.error <- GC.results$D01.ERROR
+  Age.measured <- De.measured/ ddot
+  Age.measured.error <- Age.measured * sqrt( (De.measured.error / De.measured)^2 +
+                                               (readerDdot.error / readerDdot)^2 +
+                                               (ddot.error / ddot)^2)
+
+
+
+  ## (2) SIMULATED -----------------------------------------------------
+
+  # create MC samples
+  rhop_MC <- rnorm(n = settings$n.MC, mean = rhop[1], sd = rhop[2])
+
+  #
+  fitcoef <- do.call(rbind, sapply(rhop_MC, function(rhop_i) {
+    fit_sim <- try(minpack.lm::nlsLM(LxTx.measured ~ a * theta(dosetime, rhop_i) * (1 - exp(-dosetime / D0)),
+                     start = list(a = max(LxTx.measured), D0 = D0.measured / readerDdot)))
+    if (!inherits(fit_sim, "try-error"))
+      coefs <- coef(fit_sim)
+    else
+      coefs <- c(NA, NA)
+    return(coefs)
+  }, simplify = FALSE))
+
+  # final fit for export
+  fit_simulated <- minpack.lm::nlsLM(LxTx.measured ~ a * theta(dosetime, rhop[1]) * (1 - exp(-dosetime / D0)),
+                       start = list(a = max(LxTx.measured), D0 = D0.measured / readerDdot))
+
+  # scaling factor
+  A <- mean(fitcoef[, 1], na.rm = TRUE)
+  A.error <- sd(fitcoef[ ,1], na.rm = TRUE)
+
+  # derive unfaded D0
+  D0.sim <- mean(fitcoef[ ,2], na.rm = TRUE)
+  D0.sim.error <- sd(fitcoef[ ,2], na.rm = TRUE)
+  D0.sim.Gy <- D0.sim * readerDdot
+  D0.sim.Gy.error <- D0.sim.Gy * sqrt( (D0.sim.error / D0.sim)^2 + (readerDdot.error / readerDdot)^2)
+
+
+  # calculate measured fraction of saturation
+  nN <- Ln / A
+  nN.error <- sqrt( (Ln.error / Ln)^2 + (A.error / A)^2)
+
+  # compute a natural dose response curve following the assumptions of
+  # Morthekai et al. 2011, Geochronometria
+  natdosetime <- seq(0, 1e14, length.out = settings$n.MC)
+  natdosetimeGray <- natdosetime * ddot / ka
+
+  # calculate D0 dose in seconds
+  computedD0 <- (fitcoef[ ,2] * readerDdot) / (ddot / ka)
+
+  # compute natural dose response curve
+  LxTx.sim <- A * theta(natdosetime, rhop[1]) * (1 - exp(-natdosetime / mean(computedD0, na.rm = TRUE)))
+
+  # calculate Age
+  if (Ln < max(LxTx.sim)) {
+
+    positive <- which(diff(LxTx.sim) > 0)
+
+    data.unfaded <- data.frame(dose = c(0, natdosetime[positive] * ddot / ka),
+                               LxTx = c(Ln, LxTx.sim[positive]),
+                               LxTx.error = c(Ln.error, LxTx.sim[positive] * A.error/A))
+
+    data.unfaded$LxTx.error[2] <- 0.0001
+
+    GC.settings <- list(sample = data.unfaded,
+                        mode = "interpolation",
+                        fit.method = "EXP",
+                        output.plot = TRUE,
+                        verbose = FALSE,
+                        main = "Simulated dose response curve",
+                        xlab = "Dose (Gy)")
+    
+    GC.settings <- modifyList(GC.settings, list(...))
+    GC.settings$verbose <- FALSE
+    
+    suppressWarnings(
+      GC.unfaded <- try(do.call(plot_GrowthCurve, GC.settings))
+    )
+
+    if (!inherits(GC.unfaded, "try-error")) {
+      GC.unfaded.results <- get_RLum(GC.unfaded)
+      De.sim <- GC.unfaded.results$De
+      De.error.sim <- GC.unfaded.results$De.Error
+      Age.sim <- De.sim / ddot
+      Age.sim.error <- Age.sim * sqrt( ( De.error.sim/ De.sim)^2 +
+                                         (readerDdot.error / readerDdot)^2 +
+                                         (ddot.error / ddot)^2)
+
+
+    } else {
+      De.sim <- De.error.sim <- Age.sim <- Age.sim.error <- NA
+    }
+
+  } else {
+    De.sim <- De.error.sim <- Age.sim <- Age.sim.error <- NA
+  }
+
+  if (Ln > max(LxTx.sim) * 1.1)
+    warning("[calc_Kars2008] Ln is >10 % larger than the maximum computed LxTx value.",
+            " The De and age should be regarded as infinite estimates.", call. = FALSE)
+
+  # Estimate nN_(steady state) by Monte Carlo Simulation
+  ddot_MC <- rnorm(n = settings$n.MC, mean = ddot, sd = ddot.error)
+  UFD0_MC <- rnorm(n = settings$n.MC, mean = D0.sim.Gy, sd = D0.sim.Gy.error)
+
+  nN_SS_MC <- mapply(function(rhop_i, ddot_i, UFD0_i) {
+    rprime <- seq(0.01, 5, length.out = settings$n.MC)
+    rho <- 3 * alpha^3 * rhop_i / (4 * pi)
+    r <- rprime / (4 * pi * rho / 3)^(1 / 3)
+    pr <- 3 * rprime^2 * exp(-rprime^3)
+    tau <- ((1 / Hs) * exp(1)^(alpha * r)) / ka
+    Ls <- 1 / (1 + UFD0_i / (ddot_i * tau))
+    Lstrap <- (pr * Ls) / sum(pr)
+
+    # field saturation
+    nN_SS_i <- sum(Lstrap)
+    return(nN_SS_i)
+
+  }, rhop_MC, ddot_MC, UFD0_MC, SIMPLIFY = TRUE)
+
+  nN_SS <- mean(nN_SS_MC, na.rm = TRUE)
+  nN_SS.error <- sd(nN_SS_MC, na.rm = TRUE)
+
+  ## (3) UNFADED ---------------------------------------------------------------
+  LxTx.unfaded <- LxTx.measured / theta(dosetime, rhop[1])
+  LxTx.unfaded[is.nan((LxTx.unfaded))] <- 0
+  LxTx.unfaded[is.infinite(LxTx.unfaded)] <- 0
+  dosetimeGray <- dosetime * readerDdot
+  fit_unfaded <- minpack.lm::nlsLM(LxTx.unfaded ~ a * (1 - exp(-dosetimeGray / D0)),
+                     start = list(a = max(LxTx.unfaded), D0 = D0.measured / readerDdot))
+  D0.unfaded <- coef(fit_unfaded)[["D0"]]
+  D0.error.unfaded <- summary(fit_unfaded)$coefficients["D0", "Std. Error"]
+
+  ## Create LxTx tables --------------------------------------------------------
+
+  # normalise by A (saturation point of the un-faded curve)
+  if (normalise) {
+    LxTx.measured.relErr <- (LxTx.measured.error / LxTx.measured)
+    LxTx.measured <- LxTx.measured / A
+    LxTx.measured.error <- LxTx.measured * LxTx.measured.relErr
+
+    LxTx.sim <- LxTx.sim / A
+    LxTx.unfaded <- LxTx.unfaded / A
+
+    Ln.relErr <- Ln.error / Ln
+    Ln <- Ln / A
+    Ln.error <- Ln * Ln.relErr
+  }
+
+  # combine all computed LxTx values
+  LxTx_measured <- data.frame(
+    dose = dosetimeGray,
+    LxTx = LxTx.measured,
+    LxTx.Error = LxTx.measured.error)
+
+  LxTx_simulated <- data.frame(
+    dose = natdosetimeGray,
+    LxTx = LxTx.sim,
+    LxTx.Error = LxTx.sim * A.error / A)
+
+  LxTx_unfaded <- data.frame(
+    dose = dosetimeGray,
+    LxTx = LxTx.unfaded,
+    LxTx.Error = LxTx.unfaded * A.error / A)
+
+
+  ## Plot settings -------------------------------------------------------------
+  plot.settings <- list(main = "Dose response curves",
+                        xlab = "Dose (Gy)",
+                        ylab = ifelse(normalise, "normalised LxTx (a.u.)", "LxTx (a.u.)")
+  )
+  plot.settings <- modifyList(plot.settings, list(...))
+
+  ## Plotting ------------------------------------------------------------------
+  if (plot) {
+
+    # set plot parameters
+    par.old.full <- par(no.readonly = TRUE)
+
+    # set graphical parameters
+    par(mar = c(5, 4, 4, 4),
+        cex = 0.8)
+    if (summary)
+      par(oma = c(0, 3, 0, 9))
+    else
+      par(oma = c(0, 9, 0, 9))
+    
+    # Find a good estimate of the x-axis limits
+    xlim <- range(pretty(dosetimeGray))
+    if (De.sim > xlim[2])
+      xlim <- range(pretty(c(min(dosetimeGray), De.sim)))
+
+    # Create figure after Kars et al. (2008) contrasting the dose response curves
+    plot(dosetimeGray, LxTx_measured$LxTx,
+         main = plot.settings$main,
+         xlab = plot.settings$xlab,
+         ylab = plot.settings$ylab,
+         pch = 16,
+         ylim = c(0, max(do.call(rbind, list(LxTx_measured, LxTx_unfaded))[["LxTx"]])),
+         xlim = xlim
+    )
+
+    # LxTx error bars
+    segments(x0 = dosetimeGray,
+             y0 = LxTx_measured$LxTx + LxTx_measured$LxTx.Error,
+             x1 = dosetimeGray,
+             y1 = LxTx_measured$LxTx - LxTx_measured$LxTx.Error,
+             col = "black")
+
+    # re-calculate the measured dose response curve in Gray
+    xRange <- range(pretty(dosetimeGray))
+    xNew <- seq(xRange[1], xRange[2], length.out = 200)
+    yNew <- predict(GC.measured at data$Fit, list(x = xNew))
+    if (normalise)
+      yNew <- yNew / A
+
+    # add line
+    lines(xNew, yNew, col  = "black")
+
+    # add error polygon
+    polygon(x = c(natdosetimeGray, rev(natdosetimeGray)),
+            y = c(LxTx_simulated$LxTx + LxTx_simulated$LxTx.Error,
+                  rev(LxTx_simulated$LxTx - LxTx_simulated$LxTx.Error)),
+            col = adjustcolor("grey", alpha.f = 0.5), border = NA)
+
+    # computed LxTx values
+    points(natdosetimeGray, LxTx_simulated$LxTx,
+           type = "l",
+           lty = 2)
+
+
+    # Ln and DE as points
+    points(x = c(0, De.measured),
+           y = c(Ln, Ln),
+           col = "red", pch = c(1, 16))
+
+    # Ln error bar
+    segments(x0 = 0, y0 = Ln - Ln.error,
+             x1 = 0, y1 = Ln + Ln.error,
+             col = "red")
+
+    # Ln as a horizontal line
+    lines(x = c(0, max(c(De.measured, De.sim), na.rm = TRUE)),
+          y = c(Ln, Ln),
+          col = "black", lty = 3)
+
+    # vertical line of measured DE
+    lines(x = c(De.measured, De.measured),
+          y = c(0, Ln),
+          col = "black", lty = 3)
+
+    # add legends
+    legend("bottomright",
+           legend = c("Unfaded DRC",
+                      "Measured DRC",
+                      "Simulated natural DRC"),
+           lty = c(5, 1, 2),
+           bty = "n")
+
+    # add vertical line of simulated De
+    if (!is.na(De.sim)) {
+      lines(x = c(De.sim, De.sim),
+            y = c(0, Ln),
+            col = "black", lty = 3)
+      points(x = De.sim,
+             y = Ln,
+             col = "red" , pch = 16)
+    }
+
+    # add unfaded DRC
+    xRange <- range(pretty(dosetimeGray))
+    xNew <- seq(xRange[1], xRange[2], length.out = 200)
+    yNew <- predict(fit_unfaded, list(dosetimeGray = xNew))
+    if (normalise)
+      yNew <- yNew / A
+
+    lines(xNew, yNew, col  = "black", lty = 5)
+
+    points(x = dosetimeGray,
+           y = LxTx_unfaded$LxTx,
+           col = "black")
+
+    # LxTx error bars
+    segments(x0 = dosetimeGray,
+             y0 = LxTx_unfaded$LxTx + LxTx_unfaded$LxTx.Error,
+             x1 = dosetimeGray,
+             y1 = LxTx_unfaded$LxTx - LxTx_unfaded$LxTx.Error,
+             col = "black")
+
+    # add text
+    if (summary) {
+
+      # define labels as expressions
+      labels.text <- list(
+        bquote(dot(D) == .(round(ddot, 2)) %+-% .(round(ddot.error, 2)) ~ frac(Gy, ka)),
+        bquote(dot(D)["Reader"] == .(round(readerDdot, 3)) %+-% .(round(readerDdot.error, 3)) ~ frac(Gy, s)),
+        bquote(log[10]~(rho~"'") == .(round(log10(rhop[1]), 2)) %+-% .(round(rhop[2] / (rhop[1] * log(10, base = exp(1))), 2)) ),
+        bquote(bgroup("(", frac(n, N), ")") == .(round(nN, 2)) %+-% .(round(nN.error, 2)) ),
+        bquote(bgroup("(", frac(n, N), ")")[SS] == .(round(nN_SS, 2)) %+-% .(round(nN_SS.error, 2)) ),
+        bquote(D["E,sim"] == .(round(De.sim, 2)) %+-% .(round(De.error.sim, 2)) ~ Gy),
+        bquote(D["0,sim"] == .(round(D0.sim.Gy, 2)) %+-% .(round(D0.sim.Gy.error, 2)) ~ Gy),
+        bquote(Age["sim"] == .(round(Age.sim, 2)) %+-% .(round(Age.sim.error, 2)) ~ ka)
+      )
+
+      # each of the labels is positioned at 1/10 of the availalbe y-axis space
+      ypos <- seq(range(axTicks(2))[2], range(axTicks(2))[1], length.out = 10)[1:length(labels.text)]
+
+      # allow overprinting
+      par(xpd = NA)
+
+      # add labels iteratively
+      mapply(function(label, pos) {
+        text(x = max(axTicks(1)) * 1.15,
+             y = pos,
+             labels = label,
+             pos = 4)
+      }, labels.text, ypos)
+    }
+
+    # recover plot parameters
+    on.exit(par(par.old.full))
+
+  }
+
+  ## Results -------------------------------------------------------------------
+  results <- set_RLum(
+    class = "RLum.Results",
+    data = list(
+      results = data.frame("nN" = nN,
+                           "nN.error" = nN.error,
+                           "nN_SS" = nN_SS,
+                           "nN_SS.error" = nN_SS.error,
+                           "Meas_De" = De.measured,
+                           "Meas_De.error" = De.measured.error,
+                           "Meas_D0" =  D0.measured,
+                           "Meas_D0.error" = D0.measured.error,
+                           "Meas_Age" = Age.measured,
+                           "Meas_Age.error" = Age.measured.error,
+                           "Sim_De" = De.sim,
+                           "Sim_De.error" = De.error.sim,
+                           "Sim_D0" = D0.sim.Gy,
+                           "Sim_D0.error" = D0.sim.Gy.error,
+                           "Sim_Age" = Age.sim,
+                           "Sim_Age.error" = Age.sim.error,
+                           "Unfaded_D0" = D0.unfaded,
+                           "Unfaded_D0.error" = D0.error.unfaded,
+                           row.names = NULL),
+      data = data,
+      Ln = c(Ln, Ln.error),
+      LxTx_tables = list(
+        simulated = LxTx_simulated,
+        measured = LxTx_measured,
+        unfaded = LxTx_unfaded),
+      fits = list(
+        simulated = fit_simulated,
+        measured = fit_measured,
+        unfaded = fit_unfaded
+      )
+    ),
+    info = list(call = sys.call(),
+                args = as.list(sys.call())[-1])
+)
+
+  ## Console output ------------------------------------------------------------
+  if (settings$verbose) {
+    cat("\n[calc_Kars2008()]\n")
+    cat("\n -------------------------------")
+    cat("\n (n/N) [-]:\t",
+        round(results at data$results$nN, 2), "\u00b1",
+        round(results at data$results$nN.error, 2))
+    cat("\n (n/N)_SS [-]:\t",
+        round(results at data$results$nN_SS, 2),"\u00b1",
+        round(results at data$results$nN_SS.error, 2))
+    cat("\n\n ---------- Measured -----------")
+    cat("\n DE [Gy]:\t",
+        round(results at data$results$Meas_De, 2), "\u00b1",
+        round(results at data$results$Meas_De.error, 2))
+    cat("\n D0 [Gy]:\t",
+        round(results at data$results$Meas_D0, 2), "\u00b1",
+        round(results at data$results$Meas_D0.error, 2))
+    cat("\n Age [ka]:\t",
+        round(results at data$results$Meas_Age, 2), "\u00b1",
+        round(results at data$results$Meas_Age.error, 2))
+    cat("\n\n ---------- Simulated ----------")
+    cat("\n DE [Gy]:\t",
+        round(results at data$results$Sim_De, 2), "\u00b1",
+        round(results at data$results$Sim_De.error, 2))
+    cat("\n D0 [Gy]:\t",
+        round(results at data$results$Sim_D0, 2), "\u00b1",
+        round(results at data$results$Sim_D0.error, 2))
+    cat("\n Age [ka]:\t",
+        round(results at data$results$Sim_Age, 2), "\u00b1",
+        round(results at data$results$Sim_Age.error, 2))
+    cat("\n\n ---------- Un-faded -----------")
+    cat("\n D0 [Gy]:\t",
+        round(results at data$results$Unfaded_D0, 2), "\u00b1",
+        round(results at data$results$Unfaded_D0.error, 2))
+    cat("\n -------------------------------\n\n")
+
+  }
+
+  ## Return value --------------------------------------------------------------
+  return(results)
+}
diff --git a/R/calc_MaxDose.R b/R/calc_MaxDose.R
index 9afee5d..a62a592 100644
--- a/R/calc_MaxDose.R
+++ b/R/calc_MaxDose.R
@@ -4,74 +4,110 @@
 #' that calls calc_MinDose() and applies a similiar approach as described in
 #' Olley et al. (2006).
 #'
-#' \bold{Data transformation} \cr\cr To estimate the maximum dose population
+#' \bold{Data transformation} \cr\cr 
+#' To estimate the maximum dose population
 #' and its standard error, the three parameter minimum age model of Galbraith
 #' et al. (1999) is adapted. The measured De values are transformed as follows:
-#' \cr\cr 1. convert De values to natural logs \cr 2. multiply the logged data
-#' to creat a mirror image of the De distribution\cr 3. shift De values along
-#' x-axis by the smallest x-value found to obtain only positive values \cr 4.
-#' combine in quadrature the measurement error associated with each De value
-#' with a relative error specified by sigmab \cr 5. apply the MAM to these data
-#' \cr\cr When all calculations are done the results are then converted as
-#' follows\cr\cr 1. subtract the x-offset \cr 2. multiply the natural logs by
-#' -1 \cr 3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
-#' \bold{Further documentation} \cr\cr Please see \code{\link{calc_MinDose}}.
+#' \cr\cr 
+#' 1. convert De values to natural logs \cr 
+#' 2. multiply the logged data to creat a mirror image of the De distribution \cr 
+#' 3. shift De values along x-axis by the smallest x-value found to obtain only positive values \cr
+#' 4. combine in quadrature the measurement error associated with each De value
+#' with a relative error specified by sigmab \cr 
+#' 5. apply the MAM to these data \cr\cr 
+#' 
+#' When all calculations are done the results are then converted as
+#' follows\cr\cr 
+#' 1. subtract the x-offset \cr 
+#' 2. multiply the natural logs by -1 \cr 
+#' 3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
+#' 
+#' \bold{Further documentation} \cr\cr 
+#' Please see \code{\link{calc_MinDose}}.
 #'
 #' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
-#' (\bold{required}): for \code{data.frame}: two columns with De
-#' \code{(data[,1])} and De error \code{(values[,2])}
-#' @param sigmab \code{\link{numeric}} (\bold{required}): spread in De values
-#' given as a fraction (e.g. 0.2). This value represents the expected
-#' overdispersion in the data should the sample be well-bleached (Cunningham &
-#' Walling 2012, p. 100).
+#' (\bold{required}): for \code{data.frame}: two columns with De \code{(data[
+#' ,1])} and De error \code{(data[ ,2])}.
+#' 
+#' @param sigmab \code{\link{numeric}} (\bold{required}): additional spread in De values.
+#' This value represents the expected overdispersion in the data should the sample be 
+#' well-bleached (Cunningham & Walling 2012, p. 100).
+#' \bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+#' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+#' sigmab must be provided in the same absolute units of the De values (seconds or Gray).
+#' See details (\code{\link{calc_MinDose}}.
+#' 
 #' @param log \code{\link{logical}} (with default): fit the (un-)logged three
 #' parameter minimum dose model to De data
+#' 
 #' @param par \code{\link{numeric}} (with default): apply the 3- or
 #' 4-parametric minimum age model (\code{par=3} or \code{par=4}).
+#' 
 #' @param bootstrap \code{\link{logical}} (with default): apply the recycled
 #' bootstrap approach of Cunningham & Wallinga (2012).
+#' 
 #' @param init.values \code{\link{numeric}} (with default): starting values for
 #' gamma, sigma, p0 and mu. Custom values need to be provided in a vector of
 #' length three in the form of \code{c(gamma, sigma, p0)}.
+#' 
 #' @param plot \code{\link{logical}} (with default): plot output
 #' (\code{TRUE}/\code{FALSE})
+#' 
 #' @param \dots further arguments for bootstrapping (\code{bs.M, bs.N, bs.h,
 #' sigmab.sd}).  See details for their usage.
+#' 
 #' @return Please see \code{\link{calc_MinDose}}.
-#' @section Function version: 0.3
+#' 
+#' @section Function version: 0.3.1
+#' 
 #' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
 #' rewritten S script of Rex Galbraith, 2010 \cr
+#' 
 #' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
 #' \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
 #' \code{\link{calc_MinDose}}
+#' 
 #' @references Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
 #' 2009. A revised burial dose estimation procedure for optical dating of young
 #' and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+#' 
 #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
 #' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+#' 
 #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
 #' 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
 #' northern Australia. Part I: experimental design and statistical models.
-#' Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
-#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
-#' R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+#' Archaeometry 41, 339-364. \cr\cr 
+#' 
+#' Galbraith, R.F., 2005. Statistics for
+#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr
+#' 
+#' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
 #' calculation and display in OSL dating: An overview and some recommendations.
-#' Quaternary Geochronology 11, 1-27. \cr\cr Olley, J.M., Roberts, R.G.,
-#' Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+#' Quaternary Geochronology 11, 1-27. \cr\cr 
+#' 
+#' Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
 #' associated with human burials at Lake Mungo, Australia. Quaternary Science
-#' Reviews 25, 2469-2474.\cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
-#' Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+#' Reviews 25, 2469-2474.\cr\cr 
+#' 
+#' \bold{Further reading} \cr\cr 
+#' 
+#' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
 #' (De) distributions: Implications for OSL dating of sediment mixtures.
-#' Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
-#' 2006. Statistical modelling of single grain quartz De distributions and an
+#' Quaternary Geochronology 4, 204-230. \cr\cr 
+#' 
+#' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an
 #' assessment of procedures for estimating burial dose. Quaternary Science
-#' Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
-#' Realizing the potential of fluvial archives using robust OSL chronologies.
-#' Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
-#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' Reviews 25, 2475-2502. \cr\cr 
+#' 
+#' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr\cr 
+#' 
+#' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
 #' of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
 #' \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
 #' obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
+#' 
 #' @examples
 #'
 #' ## load example data
diff --git a/R/calc_MinDose.R b/R/calc_MinDose.R
index c494926..474a449 100644
--- a/R/calc_MinDose.R
+++ b/R/calc_MinDose.R
@@ -4,26 +4,35 @@
 #' Function to fit the (un-)logged three or four parameter minimum dose model
 #' (MAM-3/4) to De data.
 #'
-#' \bold{Parameters} \cr\cr This model has four parameters: \cr\cr
+#' \bold{Parameters} \cr\cr
+#' This model has four parameters: \cr\cr
 #' \tabular{rl}{ \code{gamma}: \tab minimum dose on the log scale \cr
 #' \code{mu}: \tab mean of the non-truncated normal distribution \cr
 #' \code{sigma}: \tab spread in ages above the minimum \cr \code{p0}: \tab
 #' proportion of grains at gamma \cr } If \code{par=3} (default) the
 #' 3-parametric minimum age model is applied, where \code{gamma=mu}. For
 #' \code{par=4} the 4-parametric model is applied instead.\cr\cr
-#' \bold{(Un-)logged model} \cr\cr In the original version of the
-#' three-parameter minimum dose model, the basic data are the natural
+#'
+#' \bold{(Un-)logged model} \cr\cr
+#' In the original version of the minimum dose model, the basic data are the natural
 #' logarithms of the De estimates and relative standard errors of the De
-#' estimates. This model will be applied if \code{log=TRUE}. \cr\cr If
-#' \code{log=FALSE}, the modified un-logged model will be applied instead. This
+#' estimates. The value for \code{sigmab} must be provided as a ratio
+#' (e.g, 0.2 for 20 \%). This model will be applied if \code{log=TRUE}. \cr\cr
+#'
+#' If \code{log=FALSE}, the modified un-logged model will be applied instead. This
 #' has essentially the same form as the original version.  \code{gamma} and
 #' \code{sigma} are in Gy and \code{gamma} becomes the minimum true dose in the
-#' population. \cr\cr While the original (logged) version of the mimimum dose
+#' population. \bold{Note} that the un-logged model requires \code{sigmab} to be in the same
+#' absolute unit as the provided De values (seconds or Gray). \cr\cr
+#'
+#' While the original (logged) version of the mimimum dose
 #' model may be appropriate for most samples (i.e. De distributions), the
 #' modified (un-logged) version is specially designed for modern-age and young
 #' samples containing negative, zero or near-zero De estimates (Arnold et al.
-#' 2009, p. 323). \cr\cr \bold{Initial values & boundaries} \cr\cr The log
-#' likelihood calculations use the \link{nlminb} function for box-constrained
+#' 2009, p. 323). \cr\cr
+#'
+#' \bold{Initial values & boundaries} \cr\cr
+#' The log likelihood calculations use the \link{nlminb} function for box-constrained
 #' optimisation using PORT routines.  Accordingly, initial values for the four
 #' parameters can be specified via \code{init.values}. If no values are
 #' provided for \code{init.values} reasonable starting values are estimated
@@ -35,8 +44,9 @@
 #' boundary values use the arguments \code{gamma.lower}, \code{gamma.upper},
 #' \code{sigma.lower}, \code{sigma.upper}, \code{p0.lower}, \code{p0.upper},
 #' \code{mu.lower} and \code{mu.upper}.  \cr\cr
-#' \bold{Bootstrap} \cr\cr When
-#' \code{bootstrap=TRUE} the function applies the bootstrapping method as
+#'
+#' \bold{Bootstrap} \cr\cr
+#' When \code{bootstrap=TRUE} the function applies the bootstrapping method as
 #' described in Wallinga & Cunningham (2012). By default, the minimum age model
 #' produces 1000 first level and 3000 second level bootstrap replicates
 #' (actually, the number of second level bootstrap replicates is three times
@@ -46,8 +56,10 @@
 #' (second level replicates) and \code{sigmab.sd} (error on sigmab). With
 #' \code{bs.h} the bandwidth of the kernel density estimate can be specified.
 #' By default, \code{h} is calculated as \cr \deqn{h =
-#' (2*\sigma_{DE})/\sqrt{n}} \cr \bold{Multicore support} \cr\cr This function
-#' supports parallel computing and can be activated by \code{multicore=TRUE}.
+#' (2*\sigma_{DE})/\sqrt{n}} \cr
+#'
+#' \bold{Multicore support} \cr\cr
+#' This function supports parallel computing and can be activated by \code{multicore=TRUE}.
 #' By default, the number of available logical CPU cores is determined
 #' automatically, but can be changed with \code{cores}. The multicore support
 #' is only available when \code{bootstrap=TRUE} and spawns \code{n} R instances
@@ -57,40 +69,65 @@
 #' of bootstrap replicates. Also note that with each additional core and hence
 #' R instance and depending on the number of bootstrap replicates the memory
 #' usage can significantly increase. Make sure that memory is always availabe,
-#' otherwise there will be a massive perfomance hit.
+#' otherwise there will be a massive perfomance hit. \cr\cr
+#'
+#' \bold{Likelihood profiles}
+#'
+#' The likelihood profiles are generated and plotted by the \code{bbmle} package.
+#' The profile likelihood plots look different to ordinary profile likelihood as \cr\cr
+#' "[...] the plot method for likelihood profiles displays the square root of
+#' the the deviance difference (twice the difference in negative log-likelihood from
+#' the best fit), so it will be V-shaped for cases where the quadratic approximation
+#' works well [...]." (Bolker 2016). \cr\cr
+#' For more details on the profile likelihood
+#' calculations and plots please see the vignettes of the \code{bbmle} package
+#' (also available here: \url{https://CRAN.R-project.org/package=bbmle}).
 #'
 #' @param data \code{\linkS4class{RLum.Results}} or \link{data.frame}
 #' (\bold{required}): for \code{data.frame}: two columns with De \code{(data[
-#' ,1])} and De error \code{(values[ ,2])}
-#' @param sigmab \code{\link{numeric}} (\bold{required}): spread in De values
-#' given as a fraction (e.g. 0.2). This value represents the expected
-#' overdispersion in the data should the sample be well-bleached (Cunningham &
-#' Walling 2012, p. 100).
+#' ,1])} and De error \code{(data[ ,2])}.
+#'
+#' @param sigmab \code{\link{numeric}} (\bold{required}): additional spread in De values.
+#' This value represents the expected overdispersion in the data should the sample be
+#' well-bleached (Cunningham & Walling 2012, p. 100).
+#' \bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+#' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+#' sigmab must be provided in the same absolute units of the De values (seconds or Gray).
+#' See details.
+#'
 #' @param log \code{\link{logical}} (with default): fit the (un-)logged minimum
-#' dose model to De data
+#' dose model to De data.
+#'
 #' @param par \code{\link{numeric}} (with default): apply the 3- or
 #' 4-parametric minimum age model (\code{par=3} or \code{par=4}). The MAM-3 is
 #' used by default.
+#'
 #' @param bootstrap \code{\link{logical}} (with default): apply the recycled
 #' bootstrap approach of Cunningham & Wallinga (2012).
+#'
 #' @param init.values \code{\link{numeric}} (optional): a named list with
 #' starting values for gamma, sigma, p0 and mu (e.g. \code{list(gamma=100
 #' sigma=1.5, p0=0.1, mu=100)}). If no values are provided reasonable values
 #' are tried to be estimated from the data.
+#'
 #' @param level \code{\link{logical}} (with default): the confidence level
 #' required (defaults to 0.95).
+#'
 #' @param plot \code{\link{logical}} (with default): plot output
 #' (\code{TRUE}/\code{FALSE})
+#'
 #' @param multicore \code{\link{logical}} (with default): enable parallel
 #' computation of the bootstrap by creating a multicore SNOW cluster. Depending
-#' on the number of available logical CPU cores this will drastically reduce
-#' the computation time. Note that this option is highly experimental and not
-#' work for all machines. (\code{TRUE}/\code{FALSE})
+#' on the number of available logical CPU cores this may drastically reduce
+#' the computation time. Note that this option is highly experimental and may not
+#' work on all machines. (\code{TRUE}/\code{FALSE})
+#'
 #' @param \dots (optional) further arguments for bootstrapping (\code{bs.M,
-#' bs.N, bs.h, sigmab.sd}).  See details for their usage. Further arguments are
+#' bs.N, bs.h, sigmab.sd}). See details for their usage. Further arguments are
 #' \code{verbose} to de-/activate console output (logical), \code{debug} for
 #' extended console output (logical) and \code{cores} (integer) to manually
 #' specify the number of cores to be used when \code{multicore=TRUE}.
+#'
 #' @return Returns a plot (optional) and terminal output. In addition an
 #' \code{\linkS4class{RLum.Results}} object is returned containing the
 #' following elements:
@@ -104,8 +141,8 @@
 #' \item{profile}{\link{profile.mle2} the log likelihood profiles}
 #' \item{bootstrap}{\link{list} bootstrap results}
 #'
-#' The output should be accessed using the function
-#' \code{\link{get_RLum}}
+#' The output should be accessed using the function \code{\link{get_RLum}}
+#'
 #' @note The default starting values for \emph{gamma}, \emph{mu}, \emph{sigma}
 #' and \emph{p0} may only be appropriate for some De data sets and may need to
 #' be changed for other data. This is especially true when the un-logged
@@ -113,43 +150,67 @@
 #' when running this function. If the results seem odd consider re-running the
 #' model with \code{debug=TRUE} which provides extended console output and
 #' forwards all internal warning messages.
-#' @section Function version: 0.4.3
+#'
+#' @section Function version: 0.4.4
+#'
 #' @author Christoph Burow, University of Cologne (Germany) \cr Based on a
 #' rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is
 #' based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair
 #' Cunningham is thanked for his help in implementing and cross-checking the
 #' code.
+#'
 #' @seealso \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
 #' \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
 #' \code{\link{calc_MaxDose}}
+#'
 #' @references Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
 #' 2009. A revised burial dose estimation procedure for optical dating of young
 #' and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+#'
 #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
 #' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+#'
 #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
 #' 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
 #' northern Australia. Part I: experimental design and statistical models.
-#' Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
-#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
-#' R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+#' Archaeometry 41, 339-364. \cr\cr
+#'
+#' Galbraith, R.F., 2005. Statistics for
+#' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr
+#'
+#' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
 #' calculation and display in OSL dating: An overview and some recommendations.
-#' Quaternary Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr
-#' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain
-#' equivalent dose (De) distributions: Implications for OSL dating of sediment
-#' mixtures. Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold,
-#' L.J., 2006. Statistical modelling of single grain quartz De distributions
-#' and an assessment of procedures for estimating burial dose. Quaternary
-#' Science Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
-#' Realizing the potential of fluvial archives using robust OSL chronologies.
-#' Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
-#' Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+#' Quaternary Geochronology 11, 1-27. \cr\cr
+#'
+#' Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+#' associated with human burials at Lake Mungo, Australia. Quaternary Science
+#' Reviews 25, 2469-2474.\cr\cr
+#'
+#' \bold{Further reading} \cr\cr
+#'
+#' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+#' (De) distributions: Implications for OSL dating of sediment mixtures.
+#' Quaternary Geochronology 4, 204-230. \cr\cr
+#' 
+#' Bolker, B., 2016. Maximum likelihood estimation analysis with the bbmle package.
+#' In: Bolker, B., R Development Core Team, 2016. bbmle: Tools for General Maximum Likelihood Estimation.
+#' R package version 1.0.18. https://CRAN.R-project.org/package=bbmle \cr\cr
+#' 
+#' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an
+#' assessment of procedures for estimating burial dose. Quaternary Science
+#' Reviews 25, 2475-2502. \cr\cr
+#'
+#' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies.
+#' Quaternary Geochronology 12, 98-106. \cr\cr
+#'
+#' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
 #' of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
 #' \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
 #' obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
-#' @examples
 #'
 #'
+#' @examples
+#'
 #' ## Load example data
 #' data(ExampleData.DeValues, envir = environment())
 #'
@@ -157,6 +218,7 @@
 #' # By default, this will apply the un-logged 3-parametric MAM.
 #' calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1)
 #'
+#' \dontrun{
 #' # (2) Re-run the model, but save results to a variable and turn
 #' # plotting of the log-likelihood profiles off.
 #' mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
@@ -197,7 +259,7 @@
 #'                                   rho == .(round(res$p0, 2))))
 #'
 #'
-#' \dontrun{
+#'
 #' # (3) Run the minimum age model with bootstrap
 #' # NOTE: Bootstrapping is computationally intensive
 #' # (3.1) run the minimum age model with default values for bootstrapping
@@ -266,6 +328,11 @@ calc_MinDose <- function(
     }
   }
 
+  if (any(!complete.cases(data))) {
+    message(paste("\n[calc_MinDose] Warning:\nInput data contained NA/NaN values,",
+                  "which were removed prior to calculations!"))
+    data <- data[complete.cases(data), ]
+  }
 
   ##============================================================================##
   ## ... ARGUMENTS
@@ -344,11 +411,11 @@ calc_MinDose <- function(
   ##============================================================================##
 
   if (missing(init.values)) {
-    start <- list(gamma = ifelse(log, log(quantile(data[ ,1], probs = 0.25)),
-                                 quantile(data[ ,1], probs = 0.25)),
+    start <- list(gamma = ifelse(log, log(quantile(data[ ,1], probs = 0.25, na.rm = TRUE)),
+                                 quantile(data[ ,1], probs = 0.25, na.rm = TRUE)),
                   sigma = 1.2,
                   p0 = 0.01,
-                  mu = ifelse(log, log(quantile(data[ ,1], probs = 0.25)),
+                  mu = ifelse(log, log(quantile(data[ ,1], probs = 0.25, na.rm = TRUE)),
                               mean(data[ ,1])))
   } else {
     start <- list(gamma = init.values$gamma,
@@ -697,7 +764,7 @@ calc_MinDose <- function(
         kd1 <- dnorm(kdthis)
 
         kd2 <- kd1*prodterm[[i]]
-        kd <- sum(kd2)
+        kd <- sum(kd2, na.rm = TRUE)
         likelihood <- (1/(N*h))*kd
         pairs[i, ] <- c(theta[i], likelihood)
       }
@@ -793,8 +860,8 @@ calc_MinDose <- function(
 
       cat("\n------ De (asymmetric error) -----\n")
       print(round(data.frame(De=pal,
-                             "lower"=ifelse(log, ifelse(!invert, exp(conf["gamma",1]), exp((conf["gamma",2]-x.offset)*-1)), conf["gamma",1]),
-                             "upper"=ifelse(log, ifelse(!invert, exp(conf["gamma",2]), exp((conf["gamma",1]-x.offset)*-1)), conf["gamma",2]),
+                             "lower"=ifelse(log, exp(conf["gamma",1]), conf["gamma",1]),
+                             "upper"=ifelse(log, exp(conf["gamma",2]), conf["gamma",2]),
                              row.names=""), 2))
 
       cat("\n------ De (symmetric error) -----\n")
@@ -811,11 +878,15 @@ calc_MinDose <- function(
   ## RETURN VALUES
   ##============================================================================##
 
+  if (invert)
+    prof at profile$gamma$par.vals[ ,"gamma"] <- rev((prof at profile$gamma$par.vals[ ,"gamma"] - x.offset)*-1)
+
   if (!bootstrap)
     pairs <- poly.three <- poly.four <- poly.five <- poly.six <- loess <- NULL
 
   newRLumResults.calc_MinDose <- set_RLum(
     class = "RLum.Results",
+    originator = "calc_MinDose",
     data = list(summary = summary,
                 data = data,
                 args = args,
diff --git a/R/calc_OSLLxTxRatio.R b/R/calc_OSLLxTxRatio.R
index 495e9b1..709a94b 100644
--- a/R/calc_OSLLxTxRatio.R
+++ b/R/calc_OSLLxTxRatio.R
@@ -72,6 +72,10 @@
 #' the count distribution assumed for the error calculation. Possible arguments
 #' \code{poisson} or \code{non-poisson}. See details for further information
 #'
+#' @param use_previousBG \code{\link{logical}} (with default): If set to \code{TRUE} the background
+#' of the Lx-signal is substracted also from the Tx-signal. Please note that in this case separat
+#' signal integral limits for the Tx signal are not allowed and will be reset.
+#'
 #' @param sigmab \code{\link{numeric}} (optional): option to set a manual value for
 #' the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
 #' calculation. The value should be provided as absolute squared count values,
@@ -87,6 +91,8 @@
 #' @return Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
 #'
 #' Slot \code{data} contains a \code{\link{list}} with the following structure:\cr
+#'
+#' \bold{@data}\cr
 #' $LxTx.table (data.frame) \cr
 #' .. $ LnLx \cr
 #' .. $ LnLx.BG \cr
@@ -101,6 +107,8 @@
 #' .. $ sigmab.LnTx\cr
 #' .. $ sigmab.TnTx\cr
 #' .. $ k \cr
+#'
+#' \bold{@info}\cr
 #' $ call (original function call)\cr
 #'
 #' @note The results of this function have been cross-checked with the Analyst
@@ -110,7 +118,7 @@
 #' own \code{sigmab} value or use \code{background.count.distribution = "poisson"}.
 #'
 #'
-#' @section Function version: 0.6.3
+#' @section Function version: 0.7.0
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -145,12 +153,13 @@
 #' @export
 calc_OSLLxTxRatio <- function(
   Lx.data,
-  Tx.data,
+  Tx.data = NULL,
   signal.integral,
   signal.integral.Tx = NULL,
   background.integral,
   background.integral.Tx = NULL,
   background.count.distribution = "non-poisson",
+  use_previousBG = FALSE,
   sigmab = NULL,
   sig0 = 0,
   digits = NULL
@@ -160,7 +169,7 @@ calc_OSLLxTxRatio <- function(
   ##(1) - integrity checks
 
 
-  if(missing(Tx.data) == FALSE){
+  if(!is.null(Tx.data)){
 
     ##(a) - check data type
     if(is(Lx.data)[1]!=is(Tx.data)[1]){
@@ -193,7 +202,7 @@ calc_OSLLxTxRatio <- function(
 
     ##(d) - check if Lx and Tx curves have the same channel length
     if(length(Lx.data[,2]) != length(Tx.data[,2])){
-      stop("[calc_OSLLxTxRatio()] Channel number of Lx and Tx data differs!")}
+      stop("[calc_OSLLxTxRatio()] Channel numbers of Lx and Tx data differ!")}
 
   }else{
 
@@ -233,6 +242,13 @@ calc_OSLLxTxRatio <- function(
   ##(h) - similar procedure for the Tx limits
   if(all(c(!is.null(signal.integral.Tx),!is.null(background.integral.Tx)))){
 
+    if(use_previousBG){
+      warning("[calc_OSLLxTxRatio()] For option use_previousBG = TRUE independent Lx and Tx integral limits are not allowed. Integral limits of Lx used for Tx.", call. = FALSE)
+      signal.integral.Tx <- signal.integral
+      background.integral.Tx <- background.integral
+
+    }
+
     if(min(signal.integral.Tx) < 1 | max(signal.integral.Tx>length(Tx.data[,2]))){
       stop("[calc_OSLLxTxRatio()] signal.integral.Tx is not valid!")}
 
@@ -247,6 +263,7 @@ calc_OSLLxTxRatio <- function(
     stop("[calc_OSLLxTxRatio()] You have to provide both: signal.integral.Tx and background.integral.Tx!")
 
   }else{
+
     signal.integral.Tx <- signal.integral
     background.integral.Tx <- background.integral
 
@@ -277,9 +294,17 @@ calc_OSLLxTxRatio <- function(
   k <- m/n
 
   n.Tx <- length(signal.integral.Tx)
-  m.Tx <- length(background.integral.Tx)
-  k.Tx <- m.Tx/n.Tx
 
+  ##use previous BG and account for the option to set different integral limits
+  if(use_previousBG){
+    m.Tx <- m
+
+  }else{
+    m.Tx <- length(background.integral.Tx)
+
+  }
+
+  k.Tx <- m.Tx/n.Tx
 
   ##LnLx (comments are corresponding variables to Galbraith, 2002)
   Lx.curve <- Lx.data[,2]
@@ -291,9 +316,19 @@ calc_OSLLxTxRatio <- function(
   ##TnTx
   Tx.curve <- ifelse(is.na(Tx.data[,1])==FALSE, Tx.data[,2], NA)
   Tx.signal <- sum(Tx.curve[signal.integral.Tx])
-  Tx.background <- sum(Tx.curve[background.integral.Tx])*1/k.Tx
+
+  ##use previous BG
+  if(use_previousBG){
+    Tx.background <- Lx.background
+
+  }else{
+    Tx.background <- sum(Tx.curve[background.integral.Tx])*1/k.Tx
+
+  }
+
   TnTx <- (Tx.signal-Tx.background)
 
+
   ##--------------------------------------------------------------------------##
   ##(3)
   ## calculate Lx/Tx Errors according Galbraith (2002) and the personal
@@ -317,11 +352,11 @@ calc_OSLLxTxRatio <- function(
 
     ##(b)(1)(1)
     ## note that m = n*k = multiple of background.integral from signal.integral
-    Y.i <- sapply(0:round(k,digits=0), function(i){
+    Y.i <- vapply(0:round(k,digits=0), function(i){
       sum(Lx.curve[
         (min(background.integral)+length(signal.integral)*i):
           (min(background.integral)+length(signal.integral)+length(signal.integral)*i)])
-    })
+    }, FUN.VALUE = vector(mode = "numeric", length = 1L))
 
     Y.i <- na.exclude(Y.i)
     sigmab.LnLx <- abs(var(Y.i) - mean(Y.i))  ##sigmab is denoted as sigma^2 = s.Y^2-Y.mean
@@ -349,13 +384,13 @@ calc_OSLLxTxRatio <- function(
     ##(b)(1)(1)
     ## note that m.Tx = n.Tx*k.Tx = multiple of background.integral.Tx from signal.integral.Tx
     ## also for the TnTx signal
-    Y.i_TnTx <- sapply(0:round(k.Tx, digits = 0), function(i) {
+    Y.i_TnTx <- vapply(0:round(k.Tx, digits = 0), function(i) {
       sum(Tx.curve[(min(background.integral.Tx) + length(signal.integral.Tx) *
                       i):(
                         min(background.integral.Tx) + length(signal.integral.Tx) + length(signal.integral.Tx) *
                           i
                       )])
-    })
+    }, FUN.VALUE = vector(mode = "numeric", length = 1L))
 
     Y.i_TnTx <- na.exclude(Y.i_TnTx)
     sigmab.TnTx <- abs(var(Y.i_TnTx) - mean(Y.i_TnTx))
@@ -363,7 +398,7 @@ calc_OSLLxTxRatio <- function(
   } else{
     ## provide warning if m is < 25, as suggested by Rex Galbraith
     ## low number of degree of freedom
-    if (m.Tx < 25) {
+    if (m.Tx < 25 && use_previousBG == FALSE) {
       warning("[calc_OSLLxTxRatio()] Number of background channels for Tx < 25; error estimation might be not reliable!", call. = FALSE)
 
     }
@@ -482,10 +517,11 @@ calc_OSLLxTxRatio <- function(
       class = "RLum.Results",
       data = list(
         LxTx.table = temp,
-        calc.parameters = calc.parameters,
-        call = sys.call())
+        calc.parameters = calc.parameters),
+      info = list(call = sys.call())
     )
 
   invisible(temp.return)
 
 }
+
diff --git a/R/calc_Statistics.R b/R/calc_Statistics.R
index c7823fe..704e1a7 100644
--- a/R/calc_Statistics.R
+++ b/R/calc_Statistics.R
@@ -1,9 +1,9 @@
 #' Function to calculate statistic measures
 #'
-#' This function calculates a number of descriptive statistics for De-data,
-#' most fundamentally using error-weighted approaches.
+#' This function calculates a number of descriptive statistics for estimates
+#' with a given standard error (SE), most fundamentally using error-weighted approaches.
 #'
-#' The option to use Monte Carlo Methods (\code{n.MCM > 0}) allows calculating
+#' The option to use Monte Carlo Methods (\code{n.MCM}) allows calculating
 #' all descriptive statistics based on random values. The distribution of these
 #' random values is based on the Normal distribution with \code{De} values as
 #' means and \code{De_error} values as one standard deviation. Increasing the
@@ -26,14 +26,14 @@
 #' specified digits. If digits is set to \code{NULL} nothing is rounded.
 #'
 #' @param n.MCM \code{\link{numeric}} (with default): number of samples drawn
-#' for Monte Carlo-based statistics. Set to zero to disable this option.
+#' for Monte Carlo-based statistics. \code{NULL} (the default) disables MC runs.
 #'
 #' @param na.rm \code{\link{logical}} (with default): indicating whether NA
 #' values should be stripped before the computation proceeds.
 #'
 #' @return Returns a list with weighted and unweighted statistic measures.
 #'
-#' @section Function version: 0.1.6
+#' @section Function version: 0.1.7
 #'
 #' @keywords datagen
 #'
@@ -64,17 +64,17 @@ calc_Statistics <- function(
   data,
   weight.calc = "square",
   digits = NULL,
-  n.MCM = 1000,
+  n.MCM = NULL,
   na.rm = TRUE
 ) {
 
   ## Check input data
   if(is(data, "RLum.Results") == FALSE &
        is(data, "data.frame") == FALSE) {
-    stop(paste("[calc_Statistics()] Input data format is neither",
-               "'data.frame' nor 'RLum.Results'"))
+    stop("[calc_Statistics()] Input data is neither of type 'data.frame' nor 'RLum.Results'", call. = FALSE)
+
   } else {
-    if(is(data, "RLum.Results") == TRUE) {
+    if(is(data, "RLum.Results")) {
       data <- get_RLum(data, "data")[,1:2]
     }
   }
@@ -93,7 +93,7 @@ calc_Statistics <- function(
   data[is.na(data[,2]),2] <- 0
 
   if(sum(data[,2]) == 0) {
-    warning("All errors are NA or zero! Automatically set to 10^-9!")
+    warning("[calc_Statistics()] All errors are NA or zero! Automatically set to 10^-9!", call. = FALSE)
     data[,2] <- rep(x = 10^-9, length(data[,2]))
   }
 
@@ -102,13 +102,13 @@ calc_Statistics <- function(
   } else if(weight.calc == "square") {
     S.weights <- 1 / data[,2]^2
   } else {
-    stop ("[calc_Statistics()] Weight calculation type not supported!")
+    stop ("[calc_Statistics()] Weight calculation type not supported!", call. = FALSE)
   }
 
   S.weights <- S.weights / sum(S.weights)
 
   ## create MCM data
-  if (n.MCM == 0) {
+  if (is.null(n.MCM)) {
     data.MCM <- cbind(data[, 1])
   } else {
     data.MCM <-
@@ -220,13 +220,13 @@ calc_Statistics <- function(
                        kurtosis = S.kurtosis)
 
   if(!is.null(digits)){
-
     S.unweighted  <- sapply(names(S.unweighted),
                             simplify = FALSE,
                             USE.NAMES = TRUE,
                             function(x) {
                               round(S.unweighted [[x]],
                                     digits = digits)})
+
   }
 
   S.MCM <- list(n = S.n,
diff --git a/R/calc_TLLxTxRatio.R b/R/calc_TLLxTxRatio.R
index ca0f067..d361617 100644
--- a/R/calc_TLLxTxRatio.R
+++ b/R/calc_TLLxTxRatio.R
@@ -2,7 +2,15 @@
 #'
 #' Calculate Lx/Tx ratio for a given set of TL curves.
 #'
-#' -
+#' \bold{Uncertainty estimation}\cr
+#'
+#' The standard errors are calculated using the following generalised equation:
+#'
+#' \deqn{SE_{signal} <- abs(Signal_{net} * BG_f /BG_{signal}}
+#'
+#' where \eqn{BG_f} is a term estimated by calculating the standard deviation of the sum of
+#' the \eqn{L_x} background counts and the sum of the \eqn{T_x} background counts. However,
+#' if both signals are similar the error becomes zero.
 #'
 #' @param Lx.data.signal \code{\linkS4class{RLum.Data.Curve}} or
 #' \code{\link{data.frame}} (\bold{required}): TL data (x =
@@ -33,9 +41,10 @@
 #' $ LxTx.table \cr .. $ LnLx \cr .. $ LnLx.BG \cr .. $ TnTx \cr .. $ TnTx.BG
 #' \cr .. $ Net_LnLx \cr .. $ Net_LnLx.Error\cr
 #'
-#' @note \bold{This function has still BETA status!}
+#' @note \bold{This function has still BETA status!} Please further note that a similar
+#' background for both curves results in a zero error and is therefore set to \code{NA}.
 #'
-#' @section Function version: 0.3.0
+#' @section Function version: 0.3.2
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France), Christoph Schmidt, University of Bayreuth (Germany)
@@ -72,9 +81,9 @@
 #' @export
 calc_TLLxTxRatio <- function(
   Lx.data.signal,
-  Lx.data.background,
+  Lx.data.background = NULL,
   Tx.data.signal,
-  Tx.data.background,
+  Tx.data.background = NULL,
   signal.integral.min,
   signal.integral.max
 ){
@@ -84,8 +93,8 @@ calc_TLLxTxRatio <- function(
   ##(1) - a few integrity check
 
      ##check for MISSING objects
-     if(missing(Lx.data.signal) == TRUE | missing(Tx.data.signal) == TRUE |
-        missing(signal.integral.min) == TRUE |  missing(signal.integral.max) == TRUE){
+     if(missing(Lx.data.signal) | missing(Tx.data.signal) |
+        missing(signal.integral.min) |  missing(signal.integral.max)){
 
        temp.missing <- paste(
                        c(if(missing(Lx.data.signal)){"Lx.data.signal"},
@@ -94,7 +103,7 @@ calc_TLLxTxRatio <- function(
                          if(missing(signal.integral.max)){"signal.integral.max"}),
                        collapse = ", ")
 
-          stop(paste("[calc_TLLxTxRatio()] Arguments are missing: ",temp.missing, ".", sep=""))
+          stop(paste("[calc_TLLxTxRatio()] Arguments are missing: ",temp.missing, ".", sep=""), call. = FALSE)
 
      }
 
@@ -135,7 +144,6 @@ calc_TLLxTxRatio <- function(
 
   ##(d) - check if Lx and Tx curves have the same channel length
      if(length(Lx.data.signal[,2])!=length(Tx.data.signal[,2])){
-
        stop("[calc_TLLxTxRatio()] Channel number of Lx and Tx data differs!")}
 
 
@@ -148,25 +156,20 @@ calc_TLLxTxRatio <- function(
 
 #  Background Consideration --------------------------------------------------
 
-
    ##Lx.data
-   if(missing(Lx.data.background)==FALSE){
-
+   if(!is.null(Lx.data.background)){
      LnLx.BG <- sum(Lx.data.background[signal.integral.min:signal.integral.max,2])
 
     }else{
-
      LnLx.BG <- NA
 
     }
 
    ##Tx.data
-      if(missing(Tx.data.background)==FALSE){
-
+      if(!is.null(Tx.data.background)){
         TnTx.BG <- sum(Tx.data.background[signal.integral.min:signal.integral.max,2])
 
       }else{
-
         TnTx.BG <- NA
 
       }
@@ -176,21 +179,27 @@ calc_TLLxTxRatio <- function(
     LnLx <- sum(Lx.data.signal[signal.integral.min:signal.integral.max,2])
     TnTx <- sum(Tx.data.signal[signal.integral.min:signal.integral.max,2])
 
-
      ##calculate variance of background
      if(is.na(LnLx.BG) == FALSE & is.na(TnTx.BG) == FALSE){
-
        BG.Error <- sd(c(LnLx.BG, TnTx.BG))
+
+       if(BG.Error == 0) {
+         warning(
+           "[calc_TLLxTxRatio()] The background signals for Lx and Tx appear to be similar, no background error was calculated.",
+           call. = FALSE
+         )
+         BG.Error <- NA
+
+       }
+
      }
 
 
     if(is.na(LnLx.BG) == FALSE){
-
       net_LnLx <-  LnLx - LnLx.BG
       net_LnLx.Error <- abs(net_LnLx * BG.Error/LnLx.BG)
 
     }else{
-
       net_LnLx <- NA
       net_LnLx.Error <- NA
 
@@ -209,8 +218,7 @@ calc_TLLxTxRatio <- function(
     }
 
 
-    if(is.na(net_TnTx) == TRUE){
-
+    if(is.na(net_TnTx)){
       LxTx <- LnLx/TnTx
       LxTx.Error <- NA
 
@@ -238,9 +246,11 @@ calc_TLLxTxRatio <- function(
 
 # Return values -----------------------------------------------------------
 
-   newRLumResults.calc_TLLxTxRatio <- set_RLum(
-     class = "RLum.Results",
-     data=list(LxTx.table = temp.results))
+    newRLumResults.calc_TLLxTxRatio <- set_RLum(
+      class = "RLum.Results",
+      data = list(LxTx.table = temp.results),
+      info = list(call = sys.call())
+    )
 
    return(newRLumResults.calc_TLLxTxRatio)
 
diff --git a/R/calc_gSGC.R b/R/calc_gSGC.R
index 1a7f10a..70c0744 100644
--- a/R/calc_gSGC.R
+++ b/R/calc_gSGC.R
@@ -147,7 +147,7 @@ calc_gSGC<- function(
     }
 
     ##Define size of output objects
-    output.data <- data.table(
+    output.data <- data.table::data.table(
       DE = numeric(length = nrow(data)),
       DE.ERROR =  numeric(length = nrow(data)),
       ETA =  numeric(length = nrow(data))
diff --git a/R/convert_BIN2CSV.R b/R/convert_BIN2CSV.R
new file mode 100644
index 0000000..682148c
--- /dev/null
+++ b/R/convert_BIN2CSV.R
@@ -0,0 +1,115 @@
+#' Export Risoe BIN-file(s) to CSV-files
+#'
+#' This function is a wrapper function around the functions \code{\link{read_BIN2R}} and
+#' \code{\link{write_RLum2CSV}} and it imports a Risoe BIN-file and directly exports its content to CSV-files.
+#' If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+#' become the output folder.
+#'
+#' @param file \code{\link{character}} (\bold{required}): name of the BIN-file to be converted to CSV-files
+#'
+#' @param \dots further arguments that will be passed to the function \code{\link{read_BIN2R}} and \code{\link{write_RLum2CSV}}
+#'
+#' @return The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE}
+#' a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_BIN2R}}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' ##transform Risoe.BINfileData values to a list
+#' data(ExampleData.BINfileData, envir = environment())
+#' convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE)
+#'
+#' \dontrun{
+#' ##select your BIN-file
+#' file <- file.choose()
+#'
+#' ##convert
+#' convert_BIN2CSV(file)
+#'
+#' }
+#'
+#' @export
+convert_BIN2CSV <- function(
+  file,
+  ...
+
+){
+
+  # General tests -------------------------------------------------------------------------------
+
+  ##file is missing?
+  if(missing(file)){
+    stop("[convert_BIN2CSV()] file is missing!", call. = FALSE)
+
+  }
+
+
+  ##set input arguments
+  convert_BIN2CSV_settings.default <- list(
+    path = if(!is(file, "Risoe.BINfileData")){dirname(file)}else{NULL},
+    show.raw.values = FALSE,
+    position = NULL,
+    n.records = NULL,
+    zero_data.rm = TRUE,
+    duplicated.rm = FALSE,
+    show.record.number = FALSE,
+    txtProgressBar = TRUE,
+    forced.VersionNumber = NULL,
+    ignore.RECTYPE = FALSE,
+    pattern = NULL,
+    verbose = TRUE,
+    export = TRUE
+
+  )
+
+  ##modify list on demand
+  convert_BIN2CSV_settings <- modifyList(x = convert_BIN2CSV_settings.default, val = list(...))
+
+  # Import file ---------------------------------------------------------------------------------
+  if(!is(file, "Risoe.BINfileData")){
+    object <- read_BIN2R(
+      file = file,
+      show.raw.values = convert_BIN2CSV_settings$show.raw.values,
+      position = convert_BIN2CSV_settings$position,
+      n.records =  convert_BIN2CSV_settings$n.records,
+      zero_data.rm = convert_BIN2CSV_settings$zero_data.rm,
+      duplicated.rm = convert_BIN2CSV_settings$duplicated.rm,
+      fastForward = TRUE,
+      show.record.number = convert_BIN2CSV_settings$show.record.number,
+      txtProgressBar = convert_BIN2CSV_settings$txtProgressBar,
+      forced.VersionNumber = convert_BIN2CSV_settings$forced.VersionNumber,
+      ignore.RECTYPE = convert_BIN2CSV_settings$ignore.RECTYPE,
+      pattern = convert_BIN2CSV_settings$pattern,
+      verbose = convert_BIN2CSV_settings$verbose
+   )
+
+  }else{
+   object <- Risoe.BINfileData2RLum.Analysis(file)
+
+
+  }
+
+  # Export to CSV -------------------------------------------------------------------------------
+
+  ##get all arguments we want to pass and remove the doubled one
+  arguments <- c(list(object = object, export = convert_BIN2CSV_settings$export), list(...))
+  arguments[duplicated(names(arguments))] <- NULL
+
+  ##this if-condition prevents NULL in the terminal
+  if(convert_BIN2CSV_settings$export == TRUE){
+    invisible(do.call("write_RLum2CSV", arguments))
+
+  }else{
+    do.call("write_RLum2CSV", arguments)
+
+  }
+
+}
diff --git a/R/convert_Daybreak2CSV.R b/R/convert_Daybreak2CSV.R
new file mode 100644
index 0000000..1410e2f
--- /dev/null
+++ b/R/convert_Daybreak2CSV.R
@@ -0,0 +1,91 @@
+#' Export measurement data produced by a Daybreak luminescence reader to CSV-files
+#'
+#' This function is a wrapper function around the functions \code{\link{read_Daybreak2R}} and
+#' \code{\link{write_RLum2CSV}} and it imports an Daybreak-file (TXT-file, DAT-file)
+#' and directly exports its content to CSV-files.  If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}})
+#' the input folder will become the output folder.
+#'
+#' @param file \code{\link{character}} (\bold{required}): name of the Daybreak-file (TXT-file, DAT-file) to be converted to CSV-files
+#'
+#' @param \dots further arguments that will be passed to the function \code{\link{read_Daybreak2R}} and \code{\link{write_RLum2CSV}}
+#'
+#' @return The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+#' a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_Daybreak2R}}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' \dontrun{
+#' ##select your BIN-file
+#' file <- file.choose()
+#'
+#' ##convert
+#' convert_Daybreak2CSV(file)
+#'
+#' }
+#'
+#' @export
+convert_Daybreak2CSV <- function(
+  file,
+  ...
+
+){
+
+  # General tests -------------------------------------------------------------------------------
+
+  ##file is missing?
+  if(missing(file)){
+    stop("[convert_Daybreak2R()] file is missing!", call. = FALSE)
+
+  }
+
+
+  ##set input arguments
+  convert_Daybreak2R_settings.default <- list(
+    raw = FALSE,
+    verbose = TRUE,
+    txtProgressBar = TRUE,
+    export = TRUE
+  )
+
+  ##modify list on demand
+  convert_Daybreak2R_settings <- modifyList(x = convert_Daybreak2R_settings.default, val = list(...))
+
+  # Import file ---------------------------------------------------------------------------------
+  if(!inherits(file, "RLum")){
+    object <- read_Daybreak2R(
+      file = file,
+      raw = convert_Daybreak2R_settings$raw,
+      verbose = convert_Daybreak2R_settings$raw,
+      txtProgressBar = convert_Daybreak2R_settings$raw
+
+   )
+  }else{
+    object <- file
+
+  }
+
+  # Export to CSV -------------------------------------------------------------------------------
+
+  ##get all arguments we want to pass and remove the doubled one
+  arguments <- c(list(object = object, export = convert_Daybreak2R_settings$export), list(...))
+  arguments[duplicated(names(arguments))] <- NULL
+
+  ##this if-condition prevents NULL in the terminal
+  if(convert_Daybreak2R_settings$export == TRUE){
+    invisible(do.call("write_RLum2CSV", arguments))
+
+  }else{
+    do.call("write_RLum2CSV", arguments)
+
+  }
+
+}
diff --git a/R/convert_PSL2CSV.R b/R/convert_PSL2CSV.R
new file mode 100644
index 0000000..cd54cf0
--- /dev/null
+++ b/R/convert_PSL2CSV.R
@@ -0,0 +1,94 @@
+#' Export PSL-file(s) to CSV-files
+#'
+#' This function is a wrapper function around the functions \code{\link{read_PSL2R}} and
+#' \code{\link{write_RLum2CSV}} and it imports an PSL-file (SUERC portable OSL reader file format)
+#' and directly exports its content to CSV-files.
+#' If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+#' become the output folder.
+#'
+#' @param file \code{\link{character}} (\bold{required}): name of the PSL-file to be converted to CSV-files
+#'
+#' @param \dots further arguments that will be passed to the function \code{\link{read_PSL2R}} and \code{\link{write_RLum2CSV}}
+#'
+#' @return The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+#' a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_PSL2R}}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' \dontrun{
+#' ##select your BIN-file
+#' file <- file.choose()
+#'
+#' ##convert
+#' convert_PSL2CSV(file)
+#'
+#' }
+#'
+#' @export
+convert_PSL2CSV <- function(
+  file,
+  ...
+
+){
+
+  # General tests -------------------------------------------------------------------------------
+
+  ##file is missing?
+  if(missing(file)){
+    stop("[convert_PSL2R()] file is missing!", call. = FALSE)
+
+  }
+
+
+  ##set input arguments
+  convert_PSL2R_settings.default <- list(
+    drop_bg = FALSE,
+    as_decay_curve = TRUE,
+    smooth = FALSE,
+    merge = FALSE,
+    export = TRUE
+  )
+
+  ##modify list on demand
+  convert_PSL2R_settings <- modifyList(x = convert_PSL2R_settings.default, val = list(...))
+
+  # Import file ---------------------------------------------------------------------------------
+  if(!inherits(file, "RLum")){
+    object <- read_PSL2R(
+      file = file,
+      drop_bg = convert_PSL2R_settings$drop_bg,
+      as_decay_curve = convert_PSL2R_settings$as_decay_curve,
+      smooth = convert_PSL2R_settings$smooth,
+      merge = convert_PSL2R_settings$merge
+
+   )
+  }else{
+    object <- file
+
+  }
+
+  # Export to CSV -------------------------------------------------------------------------------
+
+  ##get all arguments we want to pass and remove the doubled one
+  arguments <- c(list(object = object, export = convert_PSL2R_settings$export), list(...))
+  arguments[duplicated(names(arguments))] <- NULL
+
+  ##this if-condition prevents NULL in the terminal
+  if(convert_PSL2R_settings$export == TRUE){
+    invisible(do.call("write_RLum2CSV", arguments))
+
+  }else{
+    do.call("write_RLum2CSV", arguments)
+
+  }
+
+}
diff --git a/R/convert_XSYG2CSV.R b/R/convert_XSYG2CSV.R
new file mode 100644
index 0000000..f37f81b
--- /dev/null
+++ b/R/convert_XSYG2CSV.R
@@ -0,0 +1,97 @@
+#' Export XSYG-file(s) to CSV-files
+#'
+#' This function is a wrapper function around the functions \code{\link{read_XSYG2R}} and
+#' \code{\link{write_RLum2CSV}} and it imports an XSYG-file and directly exports its content to CSV-files.
+#' If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+#' become the output folder.
+#'
+#' @param file \code{\link{character}} (\bold{required}): name of the XSYG-file to be converted to CSV-files
+#'
+#' @param \dots further arguments that will be passed to the function \code{\link{read_XSYG2R}} and \code{\link{write_RLum2CSV}}
+#'
+#' @return The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+#' a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_XSYG2R}}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' ##transform XSYG-file values to a list
+#' data(ExampleData.XSYG, envir = environment())
+#' convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE)
+#'
+#' \dontrun{
+#' ##select your BIN-file
+#' file <- file.choose()
+#'
+#' ##convert
+#' convert_XSYG2CSV(file)
+#'
+#' }
+#'
+#' @export
+convert_XSYG2CSV <- function(
+  file,
+  ...
+
+){
+
+  # General tests -------------------------------------------------------------------------------
+
+  ##file is missing?
+  if(missing(file)){
+    stop("[convert_XSYG2R()] file is missing!", call. = FALSE)
+
+  }
+
+
+  ##set input arguments
+  convert_XSYG2R_settings.default <- list(
+    recalculate.TL.curves = TRUE,
+    pattern = ".xsyg",
+    txtProgressBar = TRUE,
+    export = TRUE
+
+  )
+
+  ##modify list on demand
+  convert_XSYG2R_settings <- modifyList(x = convert_XSYG2R_settings.default, val = list(...))
+
+  # Import file ---------------------------------------------------------------------------------
+  if(!inherits(file, "RLum")){
+    object <- read_XSYG2R(
+      file = file,
+      fastForward = TRUE,
+      recalculate.TL.curves = convert_XSYG2R_settings$recalculate.TL.curves,
+      pattern = convert_XSYG2R_settings$pattern,
+      txtProgressBar = convert_XSYG2R_settings$txtProgressBar
+
+   )
+  }else{
+    object <- file
+
+  }
+
+  # Export to CSV -------------------------------------------------------------------------------
+
+  ##get all arguments we want to pass and remove the doubled one
+  arguments <- c(list(object = object, export = convert_XSYG2R_settings$export), list(...))
+  arguments[duplicated(names(arguments))] <- NULL
+
+  ##this if-condition prevents NULL in the terminal
+  if(convert_XSYG2R_settings$export == TRUE){
+    invisible(do.call("write_RLum2CSV", arguments))
+
+  }else{
+    do.call("write_RLum2CSV", arguments)
+
+  }
+
+}
diff --git a/R/extract_IrradiationTimes.R b/R/extract_IrradiationTimes.R
index 0230c7e..9b5fdf2 100644
--- a/R/extract_IrradiationTimes.R
+++ b/R/extract_IrradiationTimes.R
@@ -1,14 +1,14 @@
-#' Extract irradiation times from an XSYG file
+#' Extract Irradiation Times from an XSYG-file
 #'
 #' Extracts irradiation times, dose and times since last irradiation, from a
 #' Freiberg Instruments XSYG-file. These information can be further used to
-#' update an existing BINX-file
+#' update an existing BINX-file.
 #'
 #' The function was written to compensate missing information in the BINX-file
 #' output of Freiberg Instruments lexsyg readers. As all information are
 #' available within the XSYG-file anyway, these information can be extracted
 #' and used for further analysis or/and to stored in a new BINX-file, which can
-#' be further used by other software, e.g. Analyst (Geoff Duller). \cr
+#' be further used by other software, e.g., Analyst (Geoff Duller). \cr
 #'
 #' Typical application example: g-value estimation from fading measurements
 #' using the Analyst or any other self written script.\cr
@@ -55,8 +55,8 @@
 #' If a BINX-file path and name is set, the output will be additionally
 #' transferred into a new BINX-file with the function name as suffix. For the
 #' output the path of the input BINX-file itself is used. Note that this will
-#' not work if the input object is a file path to an XSYG-file. In this case
-#' the argument input is ignored.\cr
+#' not work if the input object is a file path to an XSYG-file, instead of a
+#' link to only one file. In this case the argument input for \code{file.BINX} is ignored.\cr
 #'
 #' In the self call mode (input is a \code{list} of \code{\linkS4class{RLum.Analysis}} objects
 #' a list of \code{\linkS4class{RLum.Results}} is returned.
@@ -66,21 +66,24 @@
 #' are removed as the BINX-file format description does not allow irradiations
 #' as separat sequences steps.\cr
 #'
-#' Know issue: The 'fading correction' menu in the Analyst will not work appear
-#' with the produced BIN/BINX-file due to hidden bits, which are not reproduced
-#' by the function \code{write_R2BIN()} or if it appears it stops with a
-#' floating point error. \cr
+#' BINX-file 'Time Since Irradiation' value differs from the table output?\cr
 #'
-#' Negative values for \code{TIMESINCELAS.STEP}? Yes, this is possible and no
-#' bug, as in the XSYG file multiple curves are stored for one step. Example: A
-#' TL step may comprise three curves: (a) counts vs. time, (b) measured
+#' The way the value 'Time Since Irradiation' is defined differs. In the BINX-file the
+#' 'Time Since Irradiation' is calculated as the 'Time Since Irradiation' plus the 'Irradiation
+#' Time'. The table output returns only the real 'Time Since Irradiation', i.e. time between the
+#' end of the irradiation and the next step.
+#'
+#' Negative values for \code{TIMESINCELAS.STEP}? \cr
+#'
+#' Yes, this is possible and no bug, as in the XSYG-file multiple curves are stored for one step.
+#' Example: TL step may comprise three curves: (a) counts vs. time, (b) measured
 #' temperature vs. time and (c) predefined temperature vs. time. Three curves,
 #' but they are all belonging to one TL measurement step, but with regard to
 #' the time stamps this could produce negative values as the important function
 #' (\code{\link{read_XSYG2R}}) do not change the order of entries for one step
 #' towards a correct time order.
 #'
-#' @section Function version: 0.3.0
+#' @section Function version: 0.3.1
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -89,7 +92,8 @@
 #' \code{\linkS4class{RLum.Results}}, \code{\linkS4class{Risoe.BINfileData}},
 #' \code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}}, \code{\link{write_R2BIN}}
 #'
-#' @references Duller, G., 2007. Analyst.
+#' @references Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and
+#' recent improvements. Ancient TL 33, 35-42.
 #'
 #' @keywords IO manip
 #'
@@ -200,8 +204,7 @@ extract_IrradiationTimes <- function(
 
       ##check if file exists
       if(file.exists(file.BINX) == FALSE){
-
-        stop("[extract_IrradiationTimes()] Wrong BINX file name or file does not exsits!")
+        stop("[extract_IrradiationTimes()] Wrong BINX file name or file does not exist!", call. = FALSE)
 
       }
 
@@ -209,7 +212,7 @@ extract_IrradiationTimes <- function(
       if(tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "binx" &
            tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "BINX" ){
 
-        stop("[extract_IrradiationTimes()] File is not of type 'BINX'!")
+        stop("[extract_IrradiationTimes()] File is not of type 'BINX'!", call. = FALSE)
 
       }
 
@@ -414,8 +417,12 @@ extract_IrradiationTimes <- function(
     ##(1) remove all irradiation steps as there is no record in the BINX file and update information
     results.BINX <- results[-which(results[,"STEP"] == "irradiation (NA)"),]
 
-    ##(1a)  update information
-    temp.BINX at METADATA[,c("IRR_TIME", "TIMESINCEIRR")] <- results.BINX[,c("IRR_TIME","TIMESINCEIRR")]
+    ##(1a)  update information on the irradiation time
+    temp.BINX at METADATA[["IRR_TIME"]] <- results.BINX[["IRR_TIME"]]
+
+    ##(1b) update information on the time since irradiation by using the Risoe definition of thi
+    ##paramter, to make the file compatible to the Analyst
+    temp.BINX at METADATA[["TIMESINCEIRR"]] <- results.BINX[["IRR_TIME"]] + results.BINX[["TIMESINCEIRR"]]
 
     ##(2) compare entries in the BINX-file with the entries in the table to make sure
     ## that both have the same length
@@ -423,16 +430,21 @@ extract_IrradiationTimes <- function(
       if(nrow(results.BINX) == nrow(temp.BINX at METADATA)){
 
         ##update BINX-file
-        write_R2BIN(temp.BINX, version = "06",
+        try <- write_R2BIN(temp.BINX, version = "06",
                    file = paste0(file.BINX,"_extract_IrradiationTimes.BINX"),
                    compatibility.mode =  compatibility.mode,
                    txtProgressBar = txtProgressBar)
 
+        ##set message on the format definition
+        if(!inherits(x = try, 'try-error')){
+          message("[extract_IrradiationTimes()] 'Time Since Irradiation' was redefined in the exported BINX-file to: 'Time Since Irradiation' plus the 'Irradiation Time' to be compatible with the Analyst.")
+        }
+
 
       }
     }else{
-
-      warning("XSYG and BINX-file do not contain similar entries. BINX-file update skipped!")
+      try(
+        stop("[extract_IrradiationTimes()] XSYG-file and BINX-file did not contain similar entries. BINX-file update skipped!",call. = FALSE))
 
     }
   }
@@ -441,3 +453,4 @@ extract_IrradiationTimes <- function(
   # Output --------------------------------------------------------------------------------------
   return(set_RLum(class = "RLum.Results", data = list(irr.times = results)))
 }
+
diff --git a/R/fit_CWCurve.R b/R/fit_CWCurve.R
index 17808c7..95ff641 100644
--- a/R/fit_CWCurve.R
+++ b/R/fit_CWCurve.R
@@ -8,9 +8,9 @@
 #' \bold{Fitting function}\cr\cr The function for the CW-OSL fitting has the
 #' general form: \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, +
 #' I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } where \eqn{0 < i < 8}\cr\cr and
-#' \eqn{\lambda} is the decay constant and \eqn{N0} the intial number of
+#' \eqn{\lambda} is the decay constant and \eqn{I0} the intial number of
 #' trapped electrons.\cr (for the used equation cf. Boetter-Jensen et al.,
-#' 2003)\cr\cr \bold{Start values}\cr
+#' 2003, Eq. 2.31)\cr\cr \bold{Start values}\cr
 #'
 #' Start values are estimated automatically by fitting a linear function to the
 #' logarithmized input data set. Currently, there is no option to manually
@@ -104,9 +104,12 @@
 #' is currently not considered.\cr\cr The function \bold{does not} ensure that
 #' the fitting procedure has reached a global minimum rather than a local
 #' minimum!
-#' @section Function version: 0.5.1
+#'
+#' @section Function version: 0.5.2
+#'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
+#'
 #' @seealso \code{\link{fit_LMCurve}}, \code{\link{plot}},\code{\link{nls}},
 #' \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Results}},
 #' \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}
@@ -156,7 +159,7 @@ fit_CWCurve<- function(
 
   ##INPUT OBJECTS
   if(is(values, "RLum.Data.Curve") == FALSE & is(values, "data.frame") == FALSE){
-    stop("[fit_CWCurve()] Input object is not of type 'RLum.Data.Curve' or 'data.frame'!")
+    stop("[fit_CWCurve()] Input object is not of type 'RLum.Data.Curve' or 'data.frame'!", call. = FALSE)
   }
 
 
@@ -317,7 +320,7 @@ fit_CWCurve<- function(
 
     }else{
 
-      stop("[fit_CWCurve()] fit.method unknown.")
+      stop("[fit_CWCurve()] fit.method unknown.", call. = FALSE)
 
     }
 
@@ -345,15 +348,15 @@ fit_CWCurve<- function(
                                               maxiter = 500
                                             )),
                                       silent = TRUE))
-        
-        ## HACK: 
+
+        ## HACK:
         # minpack.lm::nlsLM() stores the 'lower' argument as class "call" rather
         # than "numeric" as nls() does. Before running confint() on this object
-        # we overwrite the "lower" slot with the numeric values again. 
+        # we overwrite the "lower" slot with the numeric values again.
         if (!inherits(fit.try, "try-error")) {
           fit.try$call$lower <- rep(0,n.components * 2)
         }
-        
+
       }else{
 
 
@@ -375,7 +378,7 @@ fit_CWCurve<- function(
 
       }#fit.method
     }
-    
+
     ##count failed attempts for fitting
     if(inherits(fit.try,"try-error")==FALSE){
 
@@ -803,9 +806,12 @@ fit_CWCurve<- function(
   newRLumResults.fit_CWCurve <- set_RLum(
     class = "RLum.Results",
     data = list(
+      data = output.table,
       fit = fit,
-      output.table = output.table,
-      component.contribution.matrix = list(component.contribution.matrix)))
+      component.contribution.matrix = list(component.contribution.matrix)
+    ),
+    info = list(call = sys.call())
+  )
 
   rm(fit)
   rm(output.table)
diff --git a/R/fit_LMCurve.R b/R/fit_LMCurve.R
index c286b4d..79b13df 100644
--- a/R/fit_LMCurve.R
+++ b/R/fit_LMCurve.R
@@ -122,10 +122,12 @@
 #' Furthermore an \code{RLum.Results} object is returned with the following structure:\cr
 #'
 #' data:\cr
+#' .. $data : \code{data.frame} with fitting results\cr
 #' .. $fit : \code{nls} (nls object)\cr
-#' .. $output.table : \code{data.frame} with fitting results\cr
 #' .. $component.contribution.matrix : \code{list} component distribution matrix\cr
-#' .. $call : \code{call} the original function call
+#'
+#' info:\cr
+#' .. $call : \code{call} the original function call\cr
 #'
 #' Matrix structure for the distribution matrix:\cr
 #'
@@ -143,7 +145,7 @@
 #' global minimum rather than a local minimum! In any case of doubt, the use of
 #' manual start values is highly recommended.
 #'
-#' @section Function version: 0.3.1
+#' @section Function version: 0.3.2
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -998,15 +1000,14 @@ fit_LMCurve<- function(
   ##============================================================================#
   ## Return Values
   ##============================================================================#
-
   newRLumResults.fit_LMCurve <- set_RLum(
     class = "RLum.Results",
     data = list(
+      data = output.table,
       fit = fit,
-      output.table = output.table,
-      component.contribution.matrix = list(component.contribution.matrix),
-      call = sys.call()
-    )
+      component.contribution.matrix = list(component.contribution.matrix)
+    ),
+    info = list(call = sys.call())
   )
 
   invisible(newRLumResults.fit_LMCurve)
diff --git a/R/get_Layout.R b/R/get_Layout.R
index 128143c..00d5e19 100644
--- a/R/get_Layout.R
+++ b/R/get_Layout.R
@@ -217,7 +217,8 @@ get_Layout <- function(
             value.dot       = numeric(1),  # De value dot colour
             value.bar       = numeric(1),  # De value error bar colour
             value.rug       = numeric(1),  # De value rug colour
-            boxplot         = numeric(1),  # boxplot colour
+            boxplot.line    = numeric(1), # boxplot line colour
+            boxplot.fill    = numeric(1), # boxplot fill colour
             mean.line       = numeric(1),  # mean line colour
             sd.bar          = numeric(1),  # sd-line colour
             background      = numeric(1)), # background colour
@@ -403,13 +404,14 @@ get_Layout <- function(
             ytck2  = 1, # secondary y-axis tick colour
             box    = 1, # plot frame box line colour
             mtext  = 2, # subheader text colour
-            stats  = "#2062B3", # statistic summary colour
-            kde.line        = "#2062B3", # KDE line colour
+            stats  = 1, # statistic summary colour
+            kde.line        = 1, # KDE line colour
             kde.fill        = NULL, # KDE fill colour
             value.dot       = 1, # De value dot colour
             value.bar       = 1, # De value error bar colour
             value.rug       = 1, # De value rug colour
-            boxplot         = 1, # boxplot colour
+            boxplot.line    = 1, # boxplot line colour
+            boxplot.fill    = NULL, # boxplot fill colour
             mean.point       = 1, # mean line colour
             sd.line          = 1, # sd bar colour
             background      = NULL), # background colour
@@ -602,7 +604,8 @@ get_Layout <- function(
             value.dot       = 1, # De value dot colour
             value.bar       = 1, # De value error bar colour
             value.rug       = 1, # De value rug colour
-            boxplot         = 1, # boxplot colour
+            boxplot.line    = 1, # boxplot line colour
+            boxplot.fill    = NULL, # boxplot fill colour
             mean.line       = adjustcolor(col = 1, 
                                           alpha.f = 0.4), # mean line colour
             sd.bar          = adjustcolor(col = 1, 
@@ -641,3 +644,4 @@ get_Layout <- function(
   ## return layout parameters
   return(layout)
 }
+
diff --git a/R/get_Quote.R b/R/get_Quote.R
index 78c7a6b..e836bfc 100644
--- a/R/get_Quote.R
+++ b/R/get_Quote.R
@@ -60,7 +60,9 @@ get_Quote <- function(
     c("An unbiased reviewer", "The data is too poor to be published in QG, try a higher ranked journal."),
     c("R Team member, asked about statistical details", "No idea, I'm just here for visualisation."),
     c("An arbitrary unexperienced RLum-user", "Little by little, the bird builds its nest."),
-    c("The answer to life, the universe and everything", "get_rightAnswer()")
+    c("The answer to life, the universe and everything", "get_rightAnswer()"),
+    c("Der Tatortreiniger", "Dreck ist nur Materie am falschen Ort."),
+    c("Die Ex vom Tatortreiniger", "Das Ziel ist im Weg.")
 
     )
 
diff --git a/R/get_RLum.R b/R/get_RLum.R
index f4a6879..f7a2e28 100644
--- a/R/get_RLum.R
+++ b/R/get_RLum.R
@@ -61,57 +61,54 @@ setGeneric("get_RLum", function (object, ...) {standardGeneric("get_RLum") })
 setMethod("get_RLum",
           signature = "list",
           function(object, null.rm = FALSE, ...){
-
-
+            
+            
             selection <- lapply(1:length(object), function(x){
-
+              
               ##get rid of all objects that are not of type RLum, this is better than leaving that
               ##to the user
               if(inherits(object[[x]], what = "RLum")){
-
+                
                 ##it might be the case the object already comes with empty objects, this would
                 ##cause a crash
-                if(is(object[[x]], "RLum.Analysis") && length(object[[x]]@records) != 0){
-                  get_RLum(object[[x]],...)
-
-                }else{
+                if(is(object[[x]], "RLum.Analysis") && length(object[[x]]@records) == 0)
                   return(NULL)
-
-                }
-
-              }else{
-
-               warning(paste0("[get_RLum()] object #",x," in the list was not of type 'RLum' and has been removed!"),
-                       call. = FALSE)
+                
+                get_RLum(object[[x]], ...)
+                  
+                
+              } else {
+                
+                warning(paste0("[get_RLum()] object #",x," in the list was not of type 'RLum' and has been removed!"),
+                        call. = FALSE)
                 return(NULL)
-
+                
               }
-
+              
             })
-
-
+            
             ##remove empty or NULL objects after the selection ... if wanted
             if(null.rm){
-
-
-                ##first set all empty objects to NULL ... for RLum.Analysis objects
-                selection <- lapply(1:length(selection), function(x){
-                  if(is(selection[[x]], "RLum.Analysis") && length(selection[[x]]@records) == 0){
-                    return(NULL)
-
-                  }else{
-                    return(selection[[x]])
-
-                  }
-
-                })
-
-                ##get rid of all NULL objects
-                selection <- selection[!sapply(selection, is.null)]
-
-
+              
+              
+              ##first set all empty objects to NULL ... for RLum.Analysis objects
+              selection <- lapply(1:length(selection), function(x){
+                if(is(selection[[x]], "RLum.Analysis") && length(selection[[x]]@records) == 0){
+                  return(NULL)
+                  
+                }else{
+                  return(selection[[x]])
+                  
+                }
+                
+              })
+              
+              ##get rid of all NULL objects
+              selection <- selection[!sapply(selection, is.null)]
+              
+              
             }
-
+            
             return(selection)
-
+            
           })
diff --git a/R/github.R b/R/github.R
new file mode 100644
index 0000000..09145fb
--- /dev/null
+++ b/R/github.R
@@ -0,0 +1,217 @@
+#  ------------------------------------------------------------------------
+# Author: Christoph Burow <christoph.burow at uni-koeln.de>
+# Affiliation: University of Cologne
+# Date: 10/01/2017
+# API version: v3
+# Reference: https://developer.github.com/v3/
+#  ------------------------------------------------------------------------
+
+#' GitHub API
+#' 
+#' R Interface to the GitHub API v3.
+#' 
+#' These functions can be used to query a specific repository hosted on GitHub. \cr
+#' 
+#' 
+#' @param user \code{\link{character}}: 
+#' GitHub user name (defaults to 'r-lum').
+#' 
+#' @param repo \code{\link{character}}: 
+#' name of a GitHub repository (defaults to 'luminescence').
+#' 
+#' @param branch \code{\link{character}}: 
+#' branch of a GitHub repository (defaults to 'master').
+#' 
+#' @param n \code{\link{integer}}:
+#' number of commits returned (defaults to 5).
+#' 
+#' @param verbose \code{\link{logical}}: 
+#' print the output to the console (defaults to \code{TRUE}).
+#' 
+#' @author Christoph Burow, University of Cologne (Germany)
+#' 
+#' @section Function version: 0.1.0
+#' 
+#' @references 
+#' 
+#' GitHub Developer API v3. \url{https://developer.github.com/v3/}, last accessed: 10/01/2017.
+#' 
+#' @examples
+#' 
+#' \dontrun{
+#' github_branches(user = "r-lum", repo = "luminescence")
+#' github_issues(user = "r-lum", repo = "luminescence")
+#' github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 10)
+#' }
+#' 
+#' @name GitHub-API
+NULL
+
+# COMMITS -----------------------------------------------------------------
+#' @rdname GitHub-API
+#' 
+#' @details 
+#' \code{github_commits} lists the most recent \code{n} commits of a specific
+#' branch of a repository.
+#' 
+#' @return 
+#' \code{github_commits}: \code{\link{data.frame}} with columns:
+#' \tabular{ll}{
+#'  [ ,1] \tab SHA \cr
+#'  [ ,2] \tab AUTHOR \cr
+#'  [ ,3] \tab DATE \cr
+#'  [ ,4] \tab MESSAGE \cr
+#' }
+#' 
+#' @export
+github_commits <- function(user = "r-lum", repo = "luminescence", 
+                           branch = "master", n = 5) {
+  
+  # fetch available branches and check if provided branch exists
+  branches <- github_branches(user, repo)
+  if (!any(grepl(branch, branches$BRANCH)))
+    stop("Branch ", branch, " does not exist.", call. = FALSE)
+  
+  # build URL and retrieve content
+  sha <- branches$SHA[grep(paste0("^", branch, "$"), branches$BRANCH)]
+  url <- paste0("https://api.github.com/repos/", user, "/", repo, "/commits?",
+                "per_page=", n, "&sha=", sha)
+  content <- github_getContent(url)
+  
+  # format output as data.frame
+  output <- do.call(rbind, lapply(content, function(x) {
+    data.frame(SHA = x$sha, 
+               AUTHOR = x$commit$author$name, 
+               DATE = x$commit$author$date, 
+               MESSAGE = x$commit$message,
+               stringsAsFactors = FALSE)
+  }))
+  
+  return(output)
+}
+
+
+# BRANCHES ----------------------------------------------------------------
+#' @rdname GitHub-API
+#' 
+#' @details 
+#' \code{github_branches} can be used to list all current branches of a
+#' repository and returns the corresponding SHA hash as well as an installation
+#' command to install the branch in R via the 'devtools' package.
+#' 
+#' @return 
+#' \code{github_branches}: \code{\link{data.frame}} with columns:
+#' \tabular{ll}{
+#'  [ ,1] \tab BRANCH \cr
+#'  [ ,2] \tab SHA \cr
+#'  [ ,3] \tab INSTALL \cr
+#' }
+#' 
+#' @export
+github_branches <- function(user = "r-lum", repo = "luminescence") {
+  
+  # build URL and retrieve content
+  url <- paste0("https://api.github.com/repos/", user, "/", repo, "/branches")
+  content <- github_getContent(url)
+  
+  # extract relevant information from server response
+  branches <- sapply(content, function(x) x$name)
+  sha <- sapply(content, function(x) x$commit$sha)
+  
+  # format output as data.frame
+  output <- data.frame(
+    BRANCH = branches,
+    SHA = sha,
+    INSTALL = paste0("devtools::install_github('r-lum/luminescence@", branches, "')"),
+    stringsAsFactors = FALSE
+  )
+  
+  return(output)
+}
+
+
+# ISSUES ------------------------------------------------------------------
+#' @rdname GitHub-API
+#' 
+#' @details 
+#' \code{github_issues} lists all open issues for a repository in valid YAML.
+#' 
+#' @return 
+#' \code{github_commits}: Nested \code{\link{list}} with \code{n} elements.
+#' Each commit element is a list with elements:
+#' \tabular{ll}{
+#'  [[1]] \tab NUMBER \cr
+#'  [[2]] \tab TITLE \cr
+#'  [[3]] \tab BODY \cr
+#'  [[4]] \tab CREATED \cr
+#'  [[5]] \tab UPDATED \cr
+#'  [[6]] \tab CREATOR \cr
+#'  [[7]] \tab URL \cr
+#'  [[8]] \tab STATUS \cr
+#' }
+#' 
+#' @export
+github_issues <- function(user = "r-lum", repo = "luminescence", verbose = TRUE) {
+  
+  # build URL and retrieve content
+  url <- paste0("https://api.github.com/repos/", user,"/", repo, "/issues")
+  content <- github_getContent(url)
+  
+  # format output as nested list
+  issues <- lapply(content, function(x) {
+    list(
+      NUMBER = x$number,
+      TITLE = x$title,
+      BODY = gsub("\n", "", x$body),
+      CREATED = x$created_at,
+      UPDATED = x$updated_at,
+      CREATOR = x$user$login,
+      URL = x$url,
+      STATUS = x$state,
+      MILESTONE = x$milestone$title)
+  })
+  
+  # custom printing of the the issues-list as print.list produces unreadable
+  # console output
+  if (verbose) {
+    tmp <- lapply(issues, function(x) {
+      
+      # limit width of description text
+      DESCRIPTION <- ""
+      for (i in seq_len(ceiling(nchar(x$BODY) / 100))) 
+        DESCRIPTION <- paste(DESCRIPTION, "  ", 
+                             substr(x$BODY, i*100-99, i*100), "\n")
+      
+      # print to console in valid YAML
+      cat(paste0("---\n",
+                 'title: "', x$TITLE, '"', "\n",
+                 "number: ", x$NUMBER, "\n",
+                 'url: "', x$URL, '"', "\n",
+                 "created: ", x$CREATED, "\n",
+                 "updated: ", x$UPDATED, "\n",
+                 "creator: ", x$CREATOR, "\n",
+                 "status: ", x$STATUS, "\n",
+                 'milestone: "', x$MILESTONE, '"', "\n",
+                 "description: >\n", DESCRIPTION, 
+                 "\n\n\n"))
+      
+    })
+  }
+  # return invisible as we explicitly print the output
+  invisible(issues)
+}
+
+
+
+# HELPER ------------------------------------------------------------------
+
+# This function queries the URL, checks the server response and returns
+# the content.
+github_getContent <- function(url) {
+  response <- GET(url, accept_json())
+  if (status_code(response) != 200)
+    stop("Contacting ", url, " had status code ", status_code(response), 
+         call. = FALSE)
+  content <- content(response)
+  return(content)
+}
diff --git a/R/install_DevelopmentVersion.R b/R/install_DevelopmentVersion.R
new file mode 100644
index 0000000..01f3f66
--- /dev/null
+++ b/R/install_DevelopmentVersion.R
@@ -0,0 +1,114 @@
+#' Attempts to install the development version of the 'Luminescence' package
+#' 
+#' This function is a convenient method for installing the development
+#' version of the R package 'Luminescence' directly from GitHub.
+#' 
+#' This function uses \code{\link[Luminescence]{github_branches}} to check
+#' which development branches of the R package 'Luminescence' are currently
+#' available on GitHub. The user is then prompted to choose one of the branches
+#' to be installed. It further checks whether the R package 'devtools' is 
+#' currently installed and available on the system. Finally, it prints R code
+#' to the console that the user can copy and paste to the R console in order
+#' to install the desired development version of the package.\cr\cr
+#' 
+#' If \code{force_install=TRUE} the functions checks if 'devtools' is available
+#' and then attempts to install the chosen development branch via
+#' \code{\link[devtools]{install_github}}.
+#'
+#' @param force_install \code{\link{logical}} (optional):
+#' If \code{FALSE} (the default) the function produces and prints the required
+#' code to the console for the user to run manually afterwards. When \code{TRUE}
+#' and all requirements are fulfilled (see details) this function attempts to install
+#' the package itself.
+#'
+#' @return
+#' This function requires user input at the command prompt to choose the 
+#' desired development branch to be installed. The required R code to install
+#' the package is then printed to the console.
+#' 
+#' @examples
+#' 
+#' \dontrun{
+#' install_DevelopmentVersion()
+#' }
+#' 
+#' @export
+install_DevelopmentVersion <- function(force_install = FALSE) {
+  
+  message("\n[install_DevelopmentVersion]\n")
+  
+  # check which branches are currently available
+  # see ?github_branches for GitHub API implementation
+  branches <- github_branches()
+  
+  index <-  NULL
+  
+  # let user pick which branch he wants to install
+  while(is.null(index)) {
+    message(paste0("Which development branch do you want to install? \n",
+                   paste0(" [", 1:length(branches$BRANCH), "]: ", branches$BRANCH, collapse = "\n")))
+    message("\n [0]: <Exit>")
+    
+    index <- readline()
+    
+    if (index == 0)
+      return(NULL)
+    if (!index %in% seq_len(length(branches$BRANCH)))
+      index <- NULL
+    
+    cat("\n")
+  }
+
+  # select the correct branch
+  branch <- branches$BRANCH[as.numeric(index)]
+  
+  if (!force_install) {
+    
+    message("----\n",
+            "Are all prerequisites installed? Make sure to have read\n", 
+            "https://github.com/R-Lum/Luminescence/blob/master/README.md\n",
+            "----\n")
+    
+    message("Please copy and run the following code in your R command-line:\n")
+    if (!requireNamespace("devtools", quietly = TRUE))
+      message("install.packages('devtools')")
+    
+    message(branches$INSTALL[as.numeric(index)], "\n")
+    
+  } else {
+    
+    reply <- NULL
+    while(is.null(reply)) {
+      message("Are all prerequisites installed?",
+              " (https://github.com/R-Lum/Luminescence/blob/master/README.md)\n",
+              " [n/N]: No\n",
+              " [y/Y]: Yes\n")
+      reply <- readline()
+      
+      if (reply == "n" || reply == "N")
+        return(NULL)
+      if (reply != "y" && reply != "Y")
+        reply <- NULL
+    }
+    
+    # check if 'devtools' is available and install if not
+    if (!requireNamespace("devtools", quietly = TRUE)) {
+      message("Please install the 'devtools' package first by running the following command:\n",
+              "install.packages('devtools')")
+      return(NULL)
+    }
+
+    # detach the 'Luminescence' package
+    try(detach(name = "package:Luminescence", unload = TRUE, force = TRUE), 
+        silent = TRUE)
+    
+    # try to unload the dynamic library
+    dynLibs <- sapply(.dynLibs(), function(x) x[["path"]] )
+    try(dyn.unload(dynLibs[grep("Luminescence", dynLibs)]), silent = TRUE)
+
+    # install the development version
+    devtools::install_github(paste0("r-lum/luminescence@", branch))
+    
+  }
+  
+}
diff --git a/R/internals_RLum.R b/R/internals_RLum.R
index 1dbe827..a9c8b28 100644
--- a/R/internals_RLum.R
+++ b/R/internals_RLum.R
@@ -43,3 +43,176 @@
 
   return(object)
 }
+
+#+++++++++++++++++++++
+#+ .warningCatcher()        +
+#+++++++++++++++++++++
+
+#' Catches warning returned by a function and merges them.
+#' The original return of the function is returned. This function is in particular
+#' helpful if a function returns a lot of warnings with the same content.
+#'
+#' @param expr \code{\link{expression}} (\bold{required}): the R expression, usually a
+#' function
+#'
+#' @return
+#' Returns the same object as the input and a warning table
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @examples
+#'
+#' f <- function() {
+#'  warning("warning 1")
+#'  warning("warning 1")
+#'  warning("warnigs 2")
+#'  1:10
+#' }
+#' print(.warningCatcher(f()))
+#'
+#' @noRd
+.warningCatcher <- function(expr) {
+  ##set variables
+  warning_collector <- list()
+  env <-  environment()
+
+  ##run function and catch warnings
+  results <- withCallingHandlers(
+    expr = expr,
+    warning = function(c) {
+      assign(x = "warning_collector",
+             value = c,
+             envir = env)
+      invokeRestart("muffleWarning")
+    }
+  )
+
+  ##set new warning messages with merged results
+  if (length(warning_collector) > 0) {
+    w_table <- table(as.character(unlist(warning_collector)))
+    w_table_names <- names(w_table)
+
+    for (w in 1:length(w_table)) {
+      warning(paste(
+        w_table_names[w],
+        "This warning occurred",
+        w_table[w],
+        "times!"
+      ),
+      call. = FALSE)
+
+    }
+
+  }
+  return(results)
+
+}
+
+#+++++++++++++++++++++
+#+ .smoothing()      +
+#+++++++++++++++++++++
+
+#' Allows smmoothing of data based on the function zoo::rollmean
+#'
+#' The function just allows a direct and meaningfull access to the functionality of the zoo::rollmean()
+#' function. Arguments of the function are only partly valid.
+#'
+#' @param x \code{\link{numeric}} (\bold{required}): the object for which the smoothing should be
+#' applied.
+#'
+#' @param k \code{\link{integer}} (with default): window for the rolling mean; must be odd for rollmedian.
+#' If nothing is set k is set automatically
+#'
+#' @param fill \code{\link{numeric}} (with default): a vector defining the left and the right hand data
+#'
+#' @param align \code{\link{character}} (with default): specifying whether the index of the result should be
+#' left- or right-aligned or centered (default) compared to the rolling window of observations, allowed
+#' \code{"right"}, \code{"center"} and \code{left}
+#'
+#' @param method \code{\link{method}} (with default): defines which method should be applied for the
+#' smoothing: \code{"mean"} or \code{"median"}
+#'
+#' @return
+#' Returns the same object as the input and a warning table
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @examples
+#'
+#' v <- 1:100
+#' .smoothing(v)
+#'
+#' @noRd
+.smoothing <- function(
+  x,
+  k = NULL,
+  fill = NA,
+  align = "right",
+  method = "mean") {
+
+  ##set k
+  if (is.null(k)) k <- ceiling(length(x) / 100)
+    if(method == "median" && k %%2 !=0) k <- k + 1
+
+  ##smooth data
+  if(method == "mean"){
+    zoo::rollmean(x, k = k, fill = fill, align = align)
+
+  }else if(method == "median"){
+    zoo::rollmedian(x, k = k, fill = fill, align = align)
+
+  }else{
+    stop("[Luminescence:::.smoothing()] Unvalid input for 'method'!")
+
+  }
+
+}
+
+
+#++++++++++++++++++++++++++++++
+#+ Scientific axis annotation +
+#++++++++++++++++++++++++++++++
+
+#' Bored of the 1e10 notation of large numbers in R? Already tried to force
+#' R to produce more fancy labels? Worry not, fancy_scientific() (written by
+#' Jack Aidley) is at your help!
+#'
+#' Source:
+#' http://stackoverflow.com/questions/11610377/how-do-i-change-the-formatting-of-numbers-on-an-axis-with-ggplot
+#'
+#' @param l \code{\link{numeric}} (\bold{required}): a numeric vector, i.e. the
+#' labels that you want to add to your plot
+#'
+#' @return
+#' Returns an expression
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Jack Aidley
+#'
+#' @examples
+#'
+#' plot(seq(1e10, 1e20, length.out = 10),
+#'      1:10,
+#'      xaxt = "n")
+#'
+#' axis(1, at = axTicks(1),
+#'      labels = fancy_scientific(axTicks(1)))
+#'
+#' @noRd
+fancy_scientific <- function(l) {
+  # turn in to character string in scientific notation
+  l <- format(l, scientific = TRUE)
+  # quote the part before the exponent to keep all the digits
+  l <- gsub("^(.*)e", "'\\1'e", l)
+  # turn the 'e+' into plotmath format
+  l <- gsub("e", "%*%10^", l)
+  # remove plus sign
+  l <- gsub("\\+", "", l)
+  # return this as an expression
+  parse(text=l)
+}
diff --git a/R/merge_RLum.Data.Curve.R b/R/merge_RLum.Data.Curve.R
index b5d1b36..19b77f0 100644
--- a/R/merge_RLum.Data.Curve.R
+++ b/R/merge_RLum.Data.Curve.R
@@ -42,6 +42,12 @@
 #'
 #' The max values from the count values is chosen using the function
 #' \code{\link[matrixStats]{rowMins}}.
+#' 
+#' \code{"append"}\cr
+#' 
+#' Appends count values of all curves to one combined data curve. The channel width
+#' is automatically re-calculated, but requires a constant channel width of the 
+#' original data.
 #'
 #' \code{"-"}\cr
 #'
@@ -216,6 +222,10 @@ merge_RLum.Data.Curve<- function(
 
     temp.matrix <- matrixStats::rowMins(temp.matrix)
 
+  }else if(merge.method == "append") {
+
+    temp.matrix <- sapply(temp.matrix, c)
+    
   }else if(merge.method == "-"){
 
     if(ncol(temp.matrix) > 2){
@@ -256,7 +266,18 @@ merge_RLum.Data.Curve<- function(
   }
 
   ##add first column
-  temp.matrix <- cbind(object[[1]]@data[1:min(check.length),1], temp.matrix)
+  #If we append the data of the second to the first curve we have to recalculate
+  #the x-values (probably time/channel). The difference should always be the
+  #same, so we just expand the sequence if this is true. If this is not true,
+  #we revert to the default behaviour (i.e., append the x values)
+  if (merge.method == "append" & length(unique(diff(object[[1]]@data[,1])))) {
+      step <- unique(diff(object[[1]]@data[,1]))
+      newx <- seq(from = min(object[[1]]@data[,1]), by = step, length.out = sum(check.length))
+      temp.matrix <- cbind(newx, temp.matrix)
+  } else {
+    temp.matrix <- cbind(object[[1]]@data[1:min(check.length),1], temp.matrix)
+  }
+  
 
 
   ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/R/merge_Risoe.BINfileData.R b/R/merge_Risoe.BINfileData.R
index d100e45..c7990b2 100644
--- a/R/merge_Risoe.BINfileData.R
+++ b/R/merge_Risoe.BINfileData.R
@@ -26,11 +26,11 @@
 #' \code{position.number.append.gap = 1} it will become:
 #' \code{1,3,5,7,9,11,13,15,17}.
 #'
-#' @param input.objects \code{\link{character}} or
-#' \code{\linkS4class{Risoe.BINfileData}} (\bold{required}): Character vector
+#' @param input.objects \code{\link{character}} with
+#' \code{\linkS4class{Risoe.BINfileData}} objects (\bold{required}): Character vector
 #' with path and files names (e.g. \code{input.objects = c("path/file1.bin",
 #' "path/file2.bin")} or \code{\linkS4class{Risoe.BINfileData}} objects (e.g.
-#' \code{input.objects = c(object1, object2)})
+#' \code{input.objects = c(object1, object2)}). Alternatively a \code{list} is supported.
 #'
 #'
 #' @param output.file \code{\link{character}} (optional): File output path and
@@ -56,7 +56,7 @@
 #' @note The validity of the output objects is not further checked.
 #'
 #'
-#' @section Function version: 0.2.5
+#' @section Function version: 0.2.7
 #'
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
@@ -108,7 +108,7 @@ merge_Risoe.BINfileData <- function(
 
       if(file.exists(input.objects[i])==FALSE){
 
-        stop("[merge_Risoe.BINfileData()] File",input.objects[i],"does not exists!")
+        stop("[merge_Risoe.BINfileData()] File ",input.objects[i]," does not exist!", call. = FALSE)
 
       }
 
@@ -164,11 +164,12 @@ merge_Risoe.BINfileData <- function(
       temp.position.max +
       position.number.append.gap
 
-    temp.position.max <<- max(temp)
+    assign(x = "temp.position.max", value = max(temp), envir = parent.env(environment()))
 
     return(temp)
   }))
 
+
   temp.position.values <- c(temp[[1]]@METADATA[, "POSITION"], temp.position.values)
 
 
diff --git a/R/methods_RLum.R b/R/methods_RLum.R
index 448c00d..182e986 100644
--- a/R/methods_RLum.R
+++ b/R/methods_RLum.R
@@ -43,7 +43,12 @@
 #'
 #' @param z \code{\link{integer}} (optional): the column index of the matrix, data.frame
 #'
-#' @param i \code{\link{character}} (optional): name of the wanted record type or data object
+#' @param i \code{\link{character}} (optional): name of the wanted record type or data object or row in the \code{RLum.Data.Curve} object
+#'
+#' @param j \code{\link{integer}} (optional): column of the data matrix in the \code{RLum.Data.Curve} object
+#'
+#' @param value \code{\link{numeric}} \bold{(required)}: numeric value which replace the value in the
+#' \code{RLum.Data.Curve} object
 #'
 #' @param drop \code{\link{logical}} (with default): keep object structure or drop it
 #'
@@ -176,17 +181,19 @@ summary.RLum.Data.Curve <- function(object, ...) summary(object at data, ...)
 #' @export
 subset.Risoe.BINfileData <- function(x, subset, records.rm = TRUE, ...) {
 
-  if(length(list(...))){
+  if(length(list(...)))
     warning(paste("Argument not supported and skipped:", names(list(...))))
 
-  }
 
   ##select relevant rows
-  sel <- eval(
+  sel <- tryCatch(eval(
     expr = substitute(subset),
     envir = x at METADATA,
     enclos = parent.frame()
-  )
+  ),
+  error = function(e) {
+    stop("\n\nInvalid subset options. \nValid terms are: ", paste(names(x at METADATA), collapse = ", "))
+  })
 
   ##probably everything is FALSE now?
   if (records.rm) {
@@ -208,6 +215,13 @@ subset.Risoe.BINfileData <- function(x, subset, records.rm = TRUE, ...) {
 
 }
 
+#' @rdname methods_RLum
+#' @method subset RLum.Analysis
+#' @export
+subset.RLum.Analysis <- function(x, subset, ...) {
+  do.call(get_RLum, list(object = x, drop = FALSE, subset = substitute(subset))) }
+
+
 ####################################################################################################
 # methods for generic: bin()
 # ##################################################################################################
@@ -460,6 +474,16 @@ unlist.RLum.Analysis <- function(x, recursive = TRUE, ...){
 
 
 ####################################################################################################
+# methods for generic: `[<-`
+####################################################################################################
+#' @rdname methods_RLum
+#' @export
+`[<-.RLum.Data.Curve` <- function(x, i, j, value){
+  x at data[i,j] <- value #this is without any S4-method, but otherwise the overhead it too high
+  return(x)
+}
+
+####################################################################################################
 # methods for generic: `[[`
 ####################################################################################################
 #' @rdname methods_RLum
diff --git a/R/model_LuminescenceSignals.R b/R/model_LuminescenceSignals.R
index 7738967..0425b79 100644
--- a/R/model_LuminescenceSignals.R
+++ b/R/model_LuminescenceSignals.R
@@ -10,7 +10,7 @@
 #' Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaige (France), \cr
 #'
 #'
-#' @section Function version: 0.1.0
+#' @section Function version: 0.1.3
 #'
 #' @export
 model_LuminescenceSignals <-
@@ -20,14 +20,17 @@ model_LuminescenceSignals <-
            simulate_sample_history = FALSE,
            plot = TRUE,
            verbose = TRUE,
-           show.structure = FALSE,
+           show_structure = FALSE,
+           own_parameters = NULL,
+           own_state_parameters = NULL,
+           own_start_temperature = NULL,
            ...) {
-    
+
     if (!requireNamespace("RLumModel", quietly = TRUE))
       stop("Simulation of luminescence signals requires the 'RLumModel' package.",
-           " To install this package run 'install.packages('RLumModel')' in your R console.", 
+           " To install this package run 'install.packages('RLumModel')' in your R console.",
            call. = FALSE)
-    
+
     RLumModel::model_LuminescenceSignals (
       model = model,
       sequence = sequence,
@@ -35,7 +38,10 @@ model_LuminescenceSignals <-
       simulate_sample_history = simulate_sample_history ,
       plot = plot,
       verbose = verbose,
-      show.structure = show.structure,
+      show_structure = show_structure,
+      own_parameters = NULL,
+      own_state_parameters = NULL,
+      own_start_temperature = NULL,
       ...
     )
   }
diff --git a/R/plot_AbanicoPlot.R b/R/plot_AbanicoPlot.R
index 7a0730d..fdfad54 100644
--- a/R/plot_AbanicoPlot.R
+++ b/R/plot_AbanicoPlot.R
@@ -193,7 +193,7 @@
 #'
 #' @param output \code{\link{logical}}: Optional output of numerical plot
 #' parameters. These can be useful to reproduce similar plots. Default is
-#' \code{FALSE}.
+#' \code{TRUE}.
 #'
 #' @param interactive \code{\link{logical}} (with default): create an interactive
 #' abanico plot (requires the 'plotly' package)
@@ -435,7 +435,7 @@ plot_AbanicoPlot <- function(
   grid.col,
   frame = 1,
   bw = "SJ",
-  output = FALSE,
+  output = TRUE,
   interactive = FALSE,
   ...
 ) {
@@ -3676,9 +3676,10 @@ plot_AbanicoPlot <- function(
 
     )
 
-    # show interactive plot ----
+    # show and return interactive plot ----
     #print(plotly::subplot(IAP, IAP.kde))
     print(IAP)
+    return(IAP)
   }
 
   ## restore initial cex
@@ -3686,6 +3687,6 @@ plot_AbanicoPlot <- function(
 
   ## create and return numeric output
   if(output == TRUE) {
-    return(plot.output)
+    return(invisible(plot.output))
   }
 }
diff --git a/R/plot_DRTResults.R b/R/plot_DRTResults.R
index 658eb43..a8f3bf6 100644
--- a/R/plot_DRTResults.R
+++ b/R/plot_DRTResults.R
@@ -615,13 +615,13 @@ plot_DRTResults <- function(
                adj = summary.adj,
                labels = label.text[[i]],
                cex = 0.8 * cex,
-               col = col[i])
+               col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] })
         } else {
           if(mtext == "") {
             mtext(side = 3,
                   line = - i + 2.5,
                   text = label.text[[i]],
-                  col = col[i],
+                  col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] },
                   cex = cex * 0.8)
           }
         }
@@ -782,13 +782,13 @@ plot_DRTResults <- function(
              adj = summary.adj,
              labels = label.text[[i]],
              cex = 0.8 * cex,
-             col = col[i])
+             col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] })
       } else {
         if(mtext == "") {
           mtext(side = 3,
                 line = - i + 2.5,
                 text = label.text[[i]],
-                col = col[i],
+                col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] },
                 cex = cex * 0.8)
         }
       }
@@ -802,8 +802,8 @@ plot_DRTResults <- function(
            xjust = legend.adj[1],
            yjust = legend.adj[2],
            legend = legend,
-           col = col,
-           pch = pch,
+           col = unique(col),
+           pch = unique(pch),
            lty = 1,
            cex = cex * 0.8)
   }
diff --git a/R/plot_DetPlot.R b/R/plot_DetPlot.R
index 040dece..cc103ff 100644
--- a/R/plot_DetPlot.R
+++ b/R/plot_DetPlot.R
@@ -83,7 +83,7 @@
 #' every sequence should be checked carefully before running long calculations using serveral
 #' hundreds of channels.
 #'
-#' @section Function version: 0.1.0
+#' @section Function version: 0.1.1
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
@@ -255,7 +255,7 @@ plot_DetPlot <- function(
     OSL_curve <- OSL_curve[1:signal_integral.seq[n.channels + 1],]
 
     m <-
-      ((min(df$De, na.rm = TRUE) - max(df$De.Error, na.rm = TRUE)) - (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))) / (min(OSL_curve[, 2], na.rm = TRUE) - max(OSL_curve[, 2], na.rm = TRUE))
+      ((min(df$De - df$De.Error, na.rm = TRUE)) - (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))) / (min(OSL_curve[, 2], na.rm = TRUE) - max(OSL_curve[, 2], na.rm = TRUE))
     n <- (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE)) - m * max(OSL_curve[, 2])
 
     OSL_curve[, 2] <- m * OSL_curve[, 2] + n
@@ -263,7 +263,7 @@ plot_DetPlot <- function(
 
     ##set plot settings
     plot.settings <- list(
-      ylim = c((min(df$De, na.rm = TRUE) - max(df$De.Error, na.rm = TRUE)),
+      ylim = c(min(df$De - df$De.Error, na.rm = TRUE),
                (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))),
       xlim = c(min(OSL_curve[, 1]), max(OSL_curve[, 1])),
       ylab = expression(paste(D[e] / s, " and ", L[n]/(a.u.))),
diff --git a/R/plot_FilterCombinations.R b/R/plot_FilterCombinations.R
index 029caba..5449d8b 100644
--- a/R/plot_FilterCombinations.R
+++ b/R/plot_FilterCombinations.R
@@ -4,6 +4,24 @@
 #' wavelenghts are automatically interpolated for the given filter data using the function \code{\link{approx}}.
 #' With that a standardised output is reached and a net transmission window can be shown.\cr
 #'
+#' \bold{Calculations}\cr
+#'
+#' \bold{Net transmission window}\cr
+#' The net transmission window of two filters is approximated by
+#'
+#' \deqn{T_{final} = T_{1} * T_{2}}
+#'
+#'
+#' \bold{Optical density}\cr
+#'
+#' \deqn{OD = -log(T)}
+#'
+#' \bold{Total optical density}\cr
+#'
+#' \deqn{OD_{total} = OD_{1} +  OD_{2}}
+#'
+#' Please consider using own calculations for more precise values.
+#'
 #' \bold{How to provide input data?}\cr
 #'
 #' CASE 1\cr
@@ -44,6 +62,8 @@
 #' \code{legend.pos} \tab \code{character} \tab change legend position (\code{\link[graphics]{legend}}) \cr
 #' \code{legend.text} \tab \code{character} \tab same as the argument \code{legend} in (\code{\link[graphics]{legend}}) \cr
 #' \code{net_transmission.col} \tab \code{col} \tab colour of net transmission window polygon \cr
+#' \code{net_transmission.col_lines} \tab \code{col} \tab colour of net transmission window polygon lines \cr
+#' \code{ net_transmission.density} \tab  \code{numeric} \tab specify line density in the transmission polygon \cr
 #' \code{grid} \tab \code{list} \tab full list of arguments that can be passd to the function \code{\link[graphics]{grid}}
 #' }
 #'
@@ -61,6 +81,8 @@
 #' @param show_net_transmission \code{\link{logical}} (with default): show net transmission window
 #' as polygon.
 #'
+#' @param interactive \code{\link{logical}} (with default): enable/disable interactive plot
+#'
 #' @param plot \code{\link{logical}} (with default): enables or disables the plot output
 #'
 #' @param \dots further arguments that can be passed to control the plot output. Suppored are \code{main},
@@ -73,6 +95,7 @@
 #' \tabular{lll}{
 #' \bold{Object} \tab \bold{Type} \bold{Description} \cr
 #'  net_transmission_window \tab \code{matrix} \tab the resulting net transmission window \cr
+#'  OD_total \tab \code{matrix} \tab the total optical density\cr
 #'  filter_matrix \tab \code{matrix} \tab the filter matrix used for plotting
 #'
 #' }
@@ -84,7 +107,7 @@
 #'
 #' }
 #'
-#' @section Function version: 0.1.0
+#' @section Function version: 0.3.0
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
 #'
@@ -109,12 +132,23 @@
 #' filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6)))
 #' results
 #'
+#' ## Example 3 show optical density
+#' plot(results$OD_total)
+#'
+#' \dontrun{
+#' ##Example 4
+#' ##show the filters using the interative mode
+#' plot_FilterCombinations(filters = list(filter1, filter2), interative = TRUE)
+#'
+#' }
+#'
 #'
 #' @export
 plot_FilterCombinations <- function(
   filters,
   wavelength_range = 200:1000,
   show_net_transmission = TRUE,
+  interactive = FALSE,
   plot = TRUE,
   ...) {
   # Integrity tests -----------------------------------------------------------------------------
@@ -200,18 +234,33 @@ plot_FilterCombinations <- function(
   ##calculate transmission window
   filter_matrix <- cbind(filter_matrix)
   net_transmission_window <- matrix(
-    c(wavelength_range, matrixStats::rowMins(filter_matrix)),
+    c(wavelength_range, matrixStats::rowProds(filter_matrix)),
     ncol = 2)
 
+  ##add optical density to filter matrix
+
+  ##calculate OD
+  OD <- -log(filter_matrix)
+
+  ##calculate  total OD
+  OD_total <- cbind(wavelength_range, matrixStats::rowSums2(OD))
+
+  ##add to matrix
+  filter_matrix <- cbind(filter_matrix, OD)
+
   ##set rownames of filter matrix
   rownames(filter_matrix) <- wavelength_range
 
   ##set column names for filter matrix
-  colnames(filter_matrix) <- names(filters)
+  colnames(filter_matrix) <- c(names(filters), paste0(names(filters), "_OD"))
 
   # Plotting ------------------------------------------------------------------------------------
 
   if (plot) {
+
+    ##(1) ... select transmission values
+    filter_matrix_transmisison <- filter_matrix[,!grepl(pattern = "OD", x = colnames(filter_matrix))]
+
     ##set plot settings
     plot_settings <- list(
       main = "Filter Combination",
@@ -225,58 +274,123 @@ plot_FilterCombinations <- function(
       col = 1:length(filters),
       grid = expression(nx = 10, ny = 10),
       legend = TRUE,
-      legend.text = colnames(filter_matrix),
-      net_transmission.col = "grey"
+      legend.text = colnames(filter_matrix_transmisison),
+      net_transmission.col = rgb(0,0.7,0,.2),
+      net_transmission.col_lines = "grey",
+      net_transmission.density = 20
 
     )
 
     ##modify settings on request
     plot_settings <- modifyList(plot_settings, list(...))
 
-    ##plot induvidal filters
-    graphics::matplot(
-      x = wavelength_range,
-      y = filter_matrix,
-      type = "l",
-      main = plot_settings$main,
-      xlab = plot_settings$xlab,
-      ylab = plot_settings$ylab,
-      xlim = plot_settings$xlim,
-      ylim = plot_settings$ylim,
-      lty = plot_settings$lty,
-      lwd = plot_settings$lwd,
-      col = plot_settings$col
+    if(interactive){
 
-    )
+      ##check for plotly
+      if (!requireNamespace("plotly", quietly = TRUE)) {
+        stop("[plot_FilterCombinations()] Package 'plotly' needed interactive plot functionality. Please install it.",
+             call. = FALSE)
+      }
 
-    if (!is.null(plot_settings$grid)) {
-      graphics::grid(eval(plot_settings$grid))
+      ##create basic plot
+      p <-
+        plotly::plot_ly(x = wavelength_range,
+                        y = filter_matrix[,1],
+                        type = "scatter",
+                        name = colnames(filter_matrix_transmisison)[1],
+                        mode = "lines")
+
+        ##add further filters
+        if (ncol(filter_matrix_transmisison) > 1) {
+          for (i in 2:ncol(filter_matrix_transmisison)) {
+            p <- plotly::add_trace(p,
+                        y = filter_matrix[, i],
+                        name = colnames(filter_matrix_transmisison)[i],
+                        mode = 'lines')
+          }
+
+        }
+
+
+      ##add polygon
+      p <-  plotly::add_polygons(p,
+                        x = c(wavelength_range, rev(wavelength_range)),
+                        y = c(net_transmission_window[, 2], rep(0, length(wavelength_range))),
+                        name = "net transmission"
+                        )
+
+
+
+      ##change graphical parameters
+      p <-  plotly::layout(
+        p = p,
+        xaxis = list(
+          title = plot_settings$xlab
+        ),
+        yaxis = list(
+          title = plot_settings$ylab
+        ),
+        title = plot_settings$main
+      )
 
-    }
+      print(p)
+      on.exit(return(p))
+
+
+    }else{
+      ##plot induvidal filters
+      graphics::matplot(
+        x = wavelength_range,
+        y = filter_matrix_transmisison,
+        type = "l",
+        main = plot_settings$main,
+        xlab = plot_settings$xlab,
+        ylab = plot_settings$ylab,
+        xlim = plot_settings$xlim,
+        ylim = plot_settings$ylim,
+        lty = plot_settings$lty,
+        lwd = plot_settings$lwd,
+        col = plot_settings$col
 
-    ##show effective transmission, which is the minimum for each row
-    if (show_net_transmission) {
-      polygon(
-        x = c(wavelength_range, rev(wavelength_range)),
-        y = c(net_transmission_window[, 2],
-              rep(0, length(wavelength_range))),
-        col = plot_settings$net_transmission.col,
-        border = NA
       )
 
-    }
+      if (!is.null(plot_settings$grid)) {
+        graphics::grid(eval(plot_settings$grid))
 
-    #legend
-    if (plot_settings$legend) {
-      legend(
-        plot_settings$legend.pos,
-        legend = plot_settings$legend.text,
-        col = plot_settings$col,
-        lty = plot_settings$lty,
-        bty = "n"
-      )
-    }
+      }
 
+      ##show effective transmission, which is the minimum for each row
+      if (show_net_transmission) {
+        polygon(
+          x = c(wavelength_range, rev(wavelength_range)),
+          y = c(net_transmission_window[, 2],
+                rep(0, length(wavelength_range))),
+          col = plot_settings$net_transmission.col,
+          border = NA,
+        )
+        polygon(
+          x = c(wavelength_range, rev(wavelength_range)),
+          y = c(net_transmission_window[, 2],
+                rep(0, length(wavelength_range))),
+          col = plot_settings$net_transmission.col_lines,
+          border = NA,
+          density = plot_settings$net_transmission.density
+        )
+
+      }
+
+      #legend
+      if (plot_settings$legend) {
+        legend(
+          plot_settings$legend.pos,
+          legend = plot_settings$legend.text,
+          col = plot_settings$col,
+          lty = plot_settings$lty,
+          bty = "n"
+        )
+      }
+
+    }
 
   }
 
@@ -286,6 +400,7 @@ plot_FilterCombinations <- function(
     class = "RLum.Results",
     data = list(
       net_transmission_window = net_transmission_window,
+      OD_total = OD_total,
       filter_matrix = filter_matrix
 
     ),
diff --git a/R/plot_GrowthCurve.R b/R/plot_GrowthCurve.R
index 571ae64..5eee98e 100644
--- a/R/plot_GrowthCurve.R
+++ b/R/plot_GrowthCurve.R
@@ -1,7 +1,8 @@
 #' Fit and plot a growth curve for luminescence data (Lx/Tx against dose)
 #'
 #' A dose response curve is produced for luminescence measurements using a
-#' regenerative protocol.
+#' regenerative or additive protocol. The function supports interpolation and
+#' extraxpolation to calculate the equivalent dose.
 #'
 #' \bold{Fitting methods} \cr\cr For all options (except for the \code{LIN}, \code{QDR} and
 #' the \code{EXP OR LIN}), the \code{\link[minpack.lm]{nlsLM}} function with the
@@ -64,6 +65,12 @@
 #' @param na.rm \code{\link{logical}} (with default): excludes \code{NA} values
 #' from the data set prior to any further operations.
 #'
+#' @param mode \code{\link{character}} (with default): selects calculation mode of the function.
+#' (A) \code{"interpolation"} (default) calculates the De by interpolation,
+#' (B) \code{"extrapolation"} calculates the De by extrapolation and
+#' (C) \code{"alternate"} calculates no De and just fits the data points. Please note that
+#' for option \code{"regenrative"} the first point is considered as natural dose
+#'
 #' @param fit.method \code{\link{character}} (with default): function used for
 #' fitting. Possible options are: \code{LIN}, \code{QDR}, \code{EXP}, \code{EXP OR LIN},
 #' \code{EXP+LIN} or \code{EXP+EXP}. See details.
@@ -135,11 +142,12 @@
 #' \code{..$call} : \tab \code{call} \tab The original function call\cr
 #' }
 #'
-#' @section Function version: 1.8.16
+#' @section Function version: 1.9.5
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France), \cr Michael Dietze, GFZ Potsdam (Germany)
 #'
+#'
 #' @seealso \code{\link{nls}}, \code{\linkS4class{RLum.Results}},
 #' \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link{lm}}, \code{uniroot}
 #'
@@ -171,10 +179,19 @@
 #'  type = "l"
 #' )
 #'
+#' ##(5) plot using the 'extrapolation' mode
+#' LxTxData[1,2:3] <- c(0.5, 0.001)
+#' print(plot_GrowthCurve(LxTxData,mode = "extrapolation"))
+#'
+#' ##(6) plot using the 'alternate' mode
+#' LxTxData[1,2:3] <- c(0.5, 0.001)
+#' print(plot_GrowthCurve(LxTxData,mode = "alternate"))
+#'
 #' @export
 plot_GrowthCurve <- function(
   sample,
   na.rm = TRUE,
+  mode = "interpolation",
   fit.method = "EXP",
   fit.force_through_origin = FALSE,
   fit.weights = TRUE,
@@ -204,8 +221,15 @@ plot_GrowthCurve <- function(
 
   ##2.1 check for inf data in the data.frame
   if(any(is.infinite(unlist(sample)))){
-    warning("[plot_GrowthCurve()] the input data contain at least one Inf value. NULL returned!")
+    warning("[plot_GrowthCurve()] The input data contain at least one Inf value. NULL returned!")
+    return(NULL)
+  }
+
+  ##2.2 check whether the dose value is equal all the time
+  if(sum(abs(diff(sample[[1]]))) == 0){
+    try(stop("[plot_GrowthCurve()] All points have the same dose. NULL returned!", call. = FALSE))
     return(NULL)
+
   }
 
   ## optionally, count and exclude NA values and print result
@@ -255,17 +279,26 @@ plot_GrowthCurve <- function(
     fit.NumberRegPoints<-length(sample[-1,1])
   }
   if(is.null(fit.NumberRegPointsReal)){
-
-    fit.RegPointsReal <- as.integer(
-      rownames(sample[-which(duplicated(sample[,1]) | sample[,1]==0),]))
-
+    fit.RegPointsReal <- which(!duplicated(sample[,1]) | sample[,1] != 0)
     fit.NumberRegPointsReal <- length(fit.RegPointsReal)
 
   }
 
-  #1.1 Produce dataframe from input values
-  xy<-data.frame(x=sample[2:(fit.NumberRegPoints+1),1],y=sample[2:(fit.NumberRegPoints+1),2])
-  y.Error<-sample[2:(fit.NumberRegPoints+1),3]
+  #1.1 Produce dataframe from input values, two options for different modes
+  if(mode == "interpolation"){
+    xy<-data.frame(x=sample[2:(fit.NumberRegPoints+1),1],y=sample[2:(fit.NumberRegPoints+1),2])
+    y.Error<-sample[2:(fit.NumberRegPoints+1),3]
+
+  }else if (mode == "extrapolation" || mode == "alternate") {
+    xy <- data.frame(
+      x = sample[1:(fit.NumberRegPoints+1),1],
+      y = sample[1:(fit.NumberRegPoints+1),2])
+    y.Error <- sample[1:(fit.NumberRegPoints+1),3]
+
+  }else{
+    stop("[plot_GrowthCurve()] Unknown input for argument 'mode'")
+
+  }
 
   ##1.1.1 produce weights for weighted fitting
   if(fit.weights){
@@ -285,54 +318,45 @@ plot_GrowthCurve <- function(
   }
 
 
-  # Deal with extra arguments -----------------------------------------------
-  ##deal with addition arguments
-  extraArgs <- list(...)
-
-  main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
-  {"Growth curve"}
-
-  xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
-  {"Dose [s]"}
-
-  ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
-  {expression(L[x]/T[x])}
-
-  if("cex" %in% names(extraArgs)) {cex.global <- extraArgs$cex}
-
-  ylim <- if("ylim" %in% names(extraArgs)) {
-    extraArgs$ylim
-  } else {
-    if(fit.force_through_origin){
-      c(0-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
-
-    }else{
-      c(min(xy$y)-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
-    }
-
- }
-
-
-  xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
-  {c(0,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4}))}
+  #1.2 Prepare data sets regeneration points for MC Simulation
+  if (mode == "interpolation") {
+    data.MC <- t(vapply(
+      X = seq(2, fit.NumberRegPoints + 1, by = 1),
+      FUN = function(x) {
+        sample(rnorm(
+          n = 10000,
+          mean = sample[x, 2],
+          sd = abs(sample[x, 3])
+        ),
+        size = NumberIterations.MC,
+        replace = TRUE)
+      },
+      FUN.VALUE = vector("numeric", length = NumberIterations.MC)
+    ))
 
-  fun   <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+    #1.3 Do the same for the natural signal
+    data.MC.De <- numeric(NumberIterations.MC)
+    data.MC.De <-
+      sample(rnorm(10000, mean = sample[1, 2], sd = abs(sample[1, 3])),
+             NumberIterations.MC,
+             replace = TRUE)
 
-  #1.2 Prepare data sets regeneration points for MC Simulation
-  data.MC<-t(vapply(
-    X = seq(2,fit.NumberRegPoints+1,by=1),
-    FUN = function(x){
-      sample(
-        rnorm(n = 10000,
-              mean = sample[x,2], sd = abs(sample[x,3])),
-              size = NumberIterations.MC, replace=TRUE)
+  }else{
+    data.MC <- t(vapply(
+      X = seq(1, fit.NumberRegPoints + 1, by = 1),
+      FUN = function(x) {
+        sample(rnorm(
+          n = 10000,
+          mean = sample[x, 2],
+          sd = abs(sample[x, 3])
+        ),
+        size = NumberIterations.MC,
+        replace = TRUE)
       },
-    FUN.VALUE = vector("numeric", length = NumberIterations.MC)))
+      FUN.VALUE = vector("numeric", length = NumberIterations.MC)
+    ))
 
-  #1.3 Do the same for the natural signal
-  data.MC.De <- numeric(NumberIterations.MC)
-  data.MC.De <- sample(rnorm(10000,mean=sample[1,2], sd=abs(sample[1,3])),
-                       NumberIterations.MC, replace=TRUE)
+  }
 
   #1.3 set x.natural
   x.natural <- vector("numeric", length = NumberIterations.MC)
@@ -478,19 +502,52 @@ plot_GrowthCurve <- function(
     }
 
     ##solve and get De
-    De.uniroot <- try(
-      uniroot(De.fs, y = sample[1,2], lower = 0, upper = max(sample[,1]) * 1.5), silent = TRUE)
+    if (mode == "interpolation") {
+      De.uniroot <- try(uniroot(De.fs,
+                                y = sample[1, 2],
+                                lower = 0,
+                                upper = max(sample[, 1]) * 1.5), silent = TRUE)
+
+      if (!inherits(De.uniroot, "try-error")) {
+        De <- round(De.uniroot$root, digits = 2)
+        if (verbose) {
+          if (mode != "alternate") {
+            writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method,
+              " (", mode,") ", "| De = ", De))
+
+          }
+        }
+
+      } else{
+        if (verbose)
+          writeLines("[plot_GrowthCurve()] no solution found for QDR fit")
+        De <- NA
 
-    if(!inherits(De.uniroot, "try-error")){
-      De <- round(De.uniroot$root, digits = 2)
-      if(verbose){
-        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
       }
+    }else if (mode == "extrapolation"){
+      De.uniroot <- try(uniroot(De.fs,
+                                y = 0,
+                                lower = -1e06,
+                                upper = max(sample[, 1]) * 1.5), silent = TRUE)
+
+      if (!inherits(De.uniroot, "try-error")) {
+        De <- round(abs(De.uniroot$root), digits = 2)
+        if (verbose) {
+          if (mode != "alternate") {
+            writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method,
+                              " (", mode,") ", "| De = ", De))
+
+          }
+        }
+
+      } else{
+        if (verbose)
+          writeLines("[plot_GrowthCurve()] no solution found for QDR fit")
+        De <- NA
 
+      }
     }else{
-      if(verbose) writeLines("[plot_GrowthCurve()] no solution found for QDR fit")
       De <- NA
-
     }
 
 
@@ -535,16 +592,42 @@ plot_GrowthCurve <- function(
 
       }
 
-      ##solve and get De
-      De.uniroot.MC <- try(uniroot(
-        De.fs.MC,
-        y = data.MC.De[i],
-        lower = 0,
-        upper = max(sample[, 1]) * 1.5
-      ), silent = TRUE)
+      if (mode == "interpolation") {
+        ##solve and get De
+        De.uniroot.MC <- try(uniroot(
+          De.fs.MC,
+          y = data.MC.De[i],
+          lower = 0,
+          upper = max(sample[, 1]) * 1.5
+        ),
+        silent = TRUE)
+
+        if (!inherits(De.uniroot.MC, "try-error")) {
+          De.MC <- round(De.uniroot.MC$root, digits = 2)
+
+        } else{
+          De.MC <- NA
+
+        }
+
+      }else if (mode == "extrapolation"){
+        ##solve and get De
+        De.uniroot.MC <- try(uniroot(
+          De.fs.MC,
+          y = 0,
+          lower = -1e6,
+          upper = max(sample[, 1]) * 1.5
+        ),
+        silent = TRUE)
+
+        if (!inherits(De.uniroot.MC, "try-error")) {
+          De.MC <- round(abs(De.uniroot.MC$root), digits = 2)
+
+        } else{
+          De.MC <- NA
+
+        }
 
-      if(!inherits(De.uniroot.MC, "try-error")){
-        De.MC <- round(De.uniroot.MC$root, digits = 2)
 
       }else{
         De.MC <- NA
@@ -568,7 +651,6 @@ plot_GrowthCurve <- function(
   if (fit.method=="EXP" | fit.method=="EXP OR LIN" | fit.method=="LIN"){
 
     if((is.na(a) | is.na(b) | is.na(c)) && fit.method != "LIN"){
-
       warning("[plot_GrowthCurve()] Fit could not applied for this data set. NULL returned!")
       return(NULL)
 
@@ -582,10 +664,11 @@ plot_GrowthCurve <- function(
       ##try to create some start parameters from the input values to make
       ## the fitting more stable
       for(i in 1:50){
+        a <- a.MC[i]
+        b <- b.MC[i]
+        c <- c.MC[i]
 
-        a<-a.MC[i];b<-b.MC[i];c<-c.MC[i]
-
-        fit.initial <- try(nls(
+        fit.initial <- suppressWarnings(try(nls(
           y ~ fit.functionEXP(a, b, c, x),
           data = data,
           start = c(a = a, b = b, c = c),
@@ -599,7 +682,7 @@ plot_GrowthCurve <- function(
           )
         ),
         silent = TRUE
-        )
+        ))
 
         if(class(fit.initial)!="try-error"){
           #get parameters out of it
@@ -638,7 +721,6 @@ plot_GrowthCurve <- function(
       )
 
       if (inherits(fit, "try-error") & inherits(fit.initial, "try-error")){
-
         if(verbose) writeLines("[plot_GrowthCurve()] try-error for EXP fit")
 
       }else{
@@ -659,12 +741,34 @@ plot_GrowthCurve <- function(
 
 
         #calculate De
-        De<-suppressWarnings(round(-c-b*log(1-sample[1,2]/a), digits=2))
+        if(mode == "interpolation"){
+          De <- suppressWarnings(round(-c-b*log(1-sample[1,2]/a), digits=2))
+
+        }else if (mode == "extrapolation"){
+          De <- suppressWarnings(round(abs(-c-b*log(1-0/a)), digits=2))
+
+        }else{
+          De <- NA
+
+        }
+
 
         #print D01 value
         D01<-round(b,digits=2)
-        if(verbose){
-          writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method," | De = ", De, " | D01 = ",D01))
+        if (verbose) {
+          if (mode != "alternate") {
+            writeLines(paste0(
+              "[plot_GrowthCurve()] Fit: ",
+              fit.method,
+              " (",
+              mode,
+              ")",
+              " | De = ",
+              De,
+              " | D01 = ",
+              D01
+            ))
+          }
         }
 
 
@@ -706,8 +810,7 @@ plot_GrowthCurve <- function(
 
           #get parameters out of it including error handling
           if (class(fit.MC)=="try-error") {
-
-            x.natural[i]<-NA
+            x.natural[i] <- NA
 
           }else {
 
@@ -718,8 +821,18 @@ plot_GrowthCurve <- function(
             var.c[i]<-as.vector((parameters["c"]))
 
             #calculate x.natural for error calculation
-            x.natural[i]<-suppressWarnings(
-              round(-var.c[i]-var.b[i]*log(1-data.MC.De[i]/var.a[i]), digits=2))
+            if(mode == "interpolation"){
+              x.natural[i]<-suppressWarnings(
+                round(-var.c[i]-var.b[i]*log(1-data.MC.De[i]/var.a[i]), digits=2))
+
+            }else if(mode == "extrapolation"){
+              x.natural[i]<-suppressWarnings(
+                abs(-var.c[i]-var.b[i]*log(1-0/var.a[i])))
+
+            }else{
+              x.natural[i] <- NA
+
+            }
 
           }
 
@@ -747,19 +860,27 @@ plot_GrowthCurve <- function(
 
       ##Do fitting again as just allows fitting through the origin
       if(fit.force_through_origin){
-
         fit.lm<-lm(data$y ~ 0 + data$x, weights = fit.weights)
 
         #calculate De
-        De <- round((sample[1,2]/fit.lm$coefficients[1]), digits=2)
+        if(mode == "interpolation"){
+          De <- round((sample[1,2]/fit.lm$coefficients[1]), digits=2)
+
+        }else{
+          De <- 0
+        }
 
 
       }else{
-
         fit.lm<-lm(data$y ~ data$x, weights = fit.weights)
 
         #calculate De
-        De <- round((sample[1,2]-fit.lm$coefficients[1])/fit.lm$coefficients[2], digits=2)
+        if(mode == "interpolation"){
+          De <- round((sample[1,2]-fit.lm$coefficients[1])/fit.lm$coefficients[2], digits=2)
+        }else if(mode == "extrapolation"){
+          De <- round(abs((0-fit.lm$coefficients[1])/fit.lm$coefficients[2]), digits= 2)
+
+        }
 
       }
 
@@ -767,10 +888,21 @@ plot_GrowthCurve <- function(
       ##remove vector labels
       De <- as.numeric(as.character(De))
 
-      if(verbose){
-        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
-      }
+      if (verbose) {
+        if (mode != "alternate") {
+          writeLines(paste0(
+            "[plot_GrowthCurve()] Fit: ",
+            fit.method,
+            " (",
+            mode,
+            ") ",
+            "| De = ",
+            De
+          ))
 
+        }
+
+      }
 
       #start loop for Monte Carlo Error estimation
       for (i in 1:NumberIterations.MC) {
@@ -783,8 +915,13 @@ plot_GrowthCurve <- function(
           fit.lmMC <- lm(data$y ~ 0 + data$x, weights=fit.weights)
 
           #calculate x.natural
-          x.natural[i]<-round((data.MC.De[i]/fit.lmMC$coefficients[1]), digits=2)
+          if(mode == "interpolation"){
+            x.natural[i]<-round((data.MC.De[i]/fit.lmMC$coefficients[1]), digits=2)
 
+          }else if (mode == "extrapolation"){
+            x.natural[i] <- 0
+
+          }
 
         }else{
 
@@ -793,8 +930,15 @@ plot_GrowthCurve <- function(
 
 
           #calculate x.natural
-          x.natural[i]<-round((data.MC.De[i]-fit.lmMC$coefficients[1])/
-                                fit.lmMC$coefficients[2], digits=2)
+          if(mode == "interpolation"){
+            x.natural[i]<-round((data.MC.De[i]-fit.lmMC$coefficients[1])/
+                                  fit.lmMC$coefficients[2], digits=2)
+
+          }else if (mode == "extrapolation"){
+            x.natural[i]<-round(abs((0-fit.lmMC$coefficients[1])/
+                                  fit.lmMC$coefficients[2]), digits=2)
+
+          }
 
         }
 
@@ -910,30 +1054,79 @@ plot_GrowthCurve <- function(
 
       #problem: analytically it is not easy to calculate x,
       #use uniroot to solve that problem ... readjust function first
-      f.unirootEXPLIN <- function(a,b,c,g,x,LnTn){fit.functionEXPLIN(a,b,c,g,x)-LnTn}
+      if (mode == "interpolation") {
+        f.unirootEXPLIN <-
+          function(a, b, c, g, x, LnTn) {
+            fit.functionEXPLIN(a, b, c, g, x) - LnTn
+          }
+
+        temp.De <-  try(uniroot(
+          f = f.unirootEXPLIN,
+          interval = c(0, max(xy$x) * 1.5),
+          tol = 0.001,
+          a = a,
+          b = b,
+          c = c,
+          g = g,
+          LnTn = sample[1, 2],
+          extendInt = "yes",
+          maxiter = 3000
+        ),
+        silent = TRUE)
 
-      temp.De <-  try(uniroot(f = f.unirootEXPLIN,
-                              interval = c(0,max(xy$x)*1.5),
-                              tol = 0.001,
-                              a = a,
-                              b = b,
-                              c = c,
-                              g = g,
-                              LnTn = sample[1,2],
-                              extendInt = "yes",
-                              maxiter = 3000), silent = TRUE)
 
 
+        if (class(temp.De) != "try-error") {
+          De <- round(temp.De$root, digits = 2)
+        } else{
+          De <- NA
+        }
+      }else if(mode == "extrapolation"){
+          f.unirootEXPLIN <-
+            function(a, b, c, g, x, LnTn) {
+              fit.functionEXPLIN(a, b, c, g, x) - LnTn
+            }
+
+          temp.De <-  try(uniroot(
+            f = f.unirootEXPLIN,
+            interval = c(-1e06, max(xy$x) * 1.5),
+            tol = 0.001,
+            a = a,
+            b = b,
+            c = c,
+            g = g,
+            LnTn = 0,
+            extendInt = "yes",
+            maxiter = 3000
+          ),
+          silent = TRUE)
+
+
+          if (class(temp.De) != "try-error") {
+            De <- round(abs(temp.De$root), digits = 2)
+          } else{
+            De <- NA
+          }
 
-      if (class(temp.De) != "try-error") {
-        De <- round(temp.De$root, digits = 2)
       }else{
         De <- NA
+
       }
 
 
-      if(verbose){
-        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De))
+      if (verbose) {
+        if (mode != "alternate") {
+          writeLines(paste0(
+            "[plot_GrowthCurve()] Fit: ",
+            fit.method,
+            " (",
+            mode,
+            ")"
+            ,
+            " | De = ",
+            De
+          ))
+        }
       }
 
 
@@ -953,7 +1146,6 @@ plot_GrowthCurve <- function(
         pb<-txtProgressBar(min=0,max=NumberIterations.MC, char="=", style=3)
       }
 
-
       #start Monto Carlo loops
       for(i in  1:NumberIterations.MC){
 
@@ -991,23 +1183,47 @@ plot_GrowthCurve <- function(
           #problem: analytical it is not easy to calculate x,
           #use uniroot to solve this problem
 
-          temp.De.MC <-  try(uniroot(
-            f = f.unirootEXPLIN,
-            interval = c(0,max(xy$x) * 1.5),
-            tol = 0.001,
-            a = var.a[i],
-            b = var.b[i],
-            c = var.c[i],
-            g = var.g[i],
-            LnTn = data.MC.De[i]
-          ), silent = TRUE)
+          if (mode == "interpolation") {
+            temp.De.MC <-  try(uniroot(
+              f = f.unirootEXPLIN,
+              interval = c(0, max(xy$x) * 1.5),
+              tol = 0.001,
+              a = var.a[i],
+              b = var.b[i],
+              c = var.c[i],
+              g = var.g[i],
+              LnTn = data.MC.De[i]
+            ),
+            silent = TRUE)
+
+            if (class(temp.De.MC) != "try-error") {
+              x.natural[i] <- temp.De.MC$root
+            } else{
+              x.natural[i] <- NA
+            }
+          } else if (mode == "extrapolation"){
+            temp.De.MC <-  try(uniroot(
+              f = f.unirootEXPLIN,
+              interval = c(-1e6, max(xy$x) * 1.5),
+              tol = 0.001,
+              a = var.a[i],
+              b = var.b[i],
+              c = var.c[i],
+              g = var.g[i],
+              LnTn = 0
+            ),
+            silent = TRUE)
+
+            if (class(temp.De.MC) != "try-error") {
+              x.natural[i] <- abs(temp.De.MC$root)
+            } else{
+              x.natural[i] <- NA
+            }
 
-          if (class(temp.De.MC) != "try-error") {
-            x.natural[i] <- temp.De.MC$root
           }else{
             x.natural[i] <- NA
-          }
 
+          }
 
         }
         ##update progress bar
@@ -1024,9 +1240,15 @@ plot_GrowthCurve <- function(
     }else{
 
       #print message
-      if(verbose){
-        writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = NA (fitting FAILED)"))
+      if (verbose) {
+        if (mode != "alternate") {
+          writeLines(paste0(
+            "[plot_GrowthCurve()] Fit: ",
+            fit.method,
+            " | De = NA (fitting FAILED)"
+          ))
 
+        }
       }
 
 
@@ -1113,32 +1335,48 @@ plot_GrowthCurve <- function(
 
 
       #problem: analytically it is not easy to calculate x, use uniroot
-      f.unirootEXPEXP <- function(a1,a2,b1,b2,x,LnTn){fit.functionEXPEXP(a1,a2,b1,b2,x)-LnTn}
-
-      temp.De <-  try(uniroot(f = f.unirootEXPEXP,
-                              interval = c(0,max(xy$x)*1.5),
-                              tol = 0.001,
-                              a1 = a1,
-                              a2 = a2,
-                              b1 = b1,
-                              b2 = b2,
-                              LnTn = sample[1,2],
-                              extendInt = "yes",
-                              maxiter = 3000), silent = TRUE)
-
-
-      if (class(temp.De) != "try-error") {
-        De <- round(temp.De$root, digits = 2)
-      }else{
+      if (mode == "interpolation") {
+        f.unirootEXPEXP <-
+          function(a1, a2, b1, b2, x, LnTn) {
+            fit.functionEXPEXP(a1, a2, b1, b2, x) - LnTn
+          }
+
+        temp.De <-  try(uniroot(
+          f = f.unirootEXPEXP,
+          interval = c(0, max(xy$x) * 1.5),
+          tol = 0.001,
+          a1 = a1,
+          a2 = a2,
+          b1 = b1,
+          b2 = b2,
+          LnTn = sample[1, 2],
+          extendInt = "yes",
+          maxiter = 3000
+        ),
+        silent = TRUE)
+
+
+        if (class(temp.De) != "try-error") {
+          De <- round(temp.De$root, digits = 2)
+        } else{
+          De <- NA
+        }
+
+        ##remove object
+        rm(temp.De)
+      }else if (mode == "extrapolation"){
+        stop("[plot_GrowthCurve()] mode 'extrapolation' for this fitting method currently not supported!")
+
+      } else{
         De <- NA
-      }
 
-      ##remove object
-      rm(temp.De)
+      }
 
       #print D0 and De value values
       if(verbose){
+        if(mode != "alternate"){
         writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De, "| D01 = ",D01, " | D02 = ",D02))
+        }
       }
 
 
@@ -1324,6 +1562,63 @@ plot_GrowthCurve <- function(
   ##5. Plotting if plotOutput==TRUE
   if(output.plot) {
 
+
+    # Deal with extra arguments -----------------------------------------------
+    ##deal with addition arguments
+    extraArgs <- list(...)
+
+    main <- if("main" %in% names(extraArgs)) {extraArgs$main} else
+    {"Growth curve"}
+
+    xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else
+    {"Dose [s]"}
+
+    ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else
+    {
+      if(mode == "regenration"){
+        expression(L[x]/T[x])
+
+      }else{
+        "Luminescence [a.u.]"
+      }
+
+    }
+
+    if("cex" %in% names(extraArgs)) {cex.global <- extraArgs$cex}
+
+    ylim <- if("ylim" %in% names(extraArgs)) {
+      extraArgs$ylim
+    } else {
+      if(fit.force_through_origin | mode == "extrapolation"){
+        c(0-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
+
+      }else{
+        c(min(xy$y)-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2}))
+      }
+
+    }
+
+
+    xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else
+    {
+      if(mode != "extrapolation"){
+        c(0,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4}))
+
+      }else{
+        if(!is.na(De)){
+          c(-De * 2,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4}))
+        }else{
+          c(-min(xy$x) * 2,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4}))
+
+        }
+
+      }
+
+    }
+
+    fun   <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE}
+
+
     ##set plot check
     plot_check <- NULL
 
@@ -1376,6 +1671,11 @@ plot_GrowthCurve <- function(
     silent = TRUE)
 
     if (!is(plot_check, "try-error")) {
+      if(mode == "extrapolation"){
+        abline(v = 0, lty = 1, col = "grey")
+
+      }
+
       #ADD HEADER
       title(main = main, line = 3)
 
@@ -1419,9 +1719,15 @@ plot_GrowthCurve <- function(
       ##POINTS	#Plot Reg0 and Repeated Points
 
       #Natural value
-      points(sample[1, 1:2], col = "red")
-      segments(sample[1, 1], sample[1, 2] - sample[1, 3],
-               sample[1, 1], sample[1, 2] + sample[1, 3], col = "red")
+      if(mode == "interpolation"){
+        points(sample[1, 1:2], col = "red")
+        segments(sample[1, 1], sample[1, 2] - sample[1, 3],
+                 sample[1, 1], sample[1, 2] + sample[1, 3], col = "red")
+
+      }else if (mode == "extrapolation"){
+        points(x = -De, y = 0, col = "red")
+
+      }
 
       #Repeated Point
       points(xy[which(duplicated(xy[, 1])), 1], xy[which(duplicated(xy[, 1])), 2],
@@ -1432,45 +1738,60 @@ plot_GrowthCurve <- function(
                cex.global)
 
       ##ARROWS	#y-error Bars
-
       segments(xy$x, xy$y - y.Error, xy$x, xy$y + y.Error)
 
       ##LINES	#Insert Ln/Tn
-      if (is.na(De)) {
-        lines(
-          c(0, max(sample[, 1]) * 2),
-          c(sample[1, 2], sample[1, 2]),
-          col = "red",
-          lty = 2,
-          lwd = 1.25
-        )
+      if (mode == "interpolation") {
+        if (is.na(De)) {
+          lines(
+            c(0, max(sample[, 1]) * 2),
+            c(sample[1, 2], sample[1, 2]),
+            col = "red",
+            lty = 2,
+            lwd = 1.25
+          )
 
-      } else{
-        try(lines(
-          c(0, De),
-          c(sample[1, 2], sample[1, 2]),
-          col = "red",
-          lty = 2,
-          lwd = 1.25
-        ), silent = TRUE)
+        } else{
+          try(lines(
+            c(0, De),
+            c(sample[1, 2], sample[1, 2]),
+            col = "red",
+            lty = 2,
+            lwd = 1.25
+          ), silent = TRUE)
+
+        }
+        try(lines(c(De, De),
+                  c(0, sample[1, 2]),
+                  col = "red",
+                  lty = 2,
+                  lwd = 1.25), silent = TRUE)
+        try(points(De, sample[1, 2], col = "red", pch = 19), silent = TRUE)
+
+      } else if (mode == "extrapolation"){
+
+        if(!is.na(De)){
+          lines(x = c(-De, -De), y = c(0, par()$usr[1]), col = "red", lty = 2)
+          lines(y = c(0,0), x = c(0, -De), col = "red", lty = 2)
+
+
+        }
 
       }
 
-      try(lines(c(De, De),
-                c(0, sample[1, 2]),
-                col = "red",
-                lty = 2,
-                lwd = 1.25), silent = TRUE)
-      try(points(De, sample[1, 2], col = "red", pch = 19), silent = TRUE)
 
       ## check/set mtext
       mtext <- if ("mtext" %in% names(list(...))) {
         list(...)$mtext
       } else {
+        if(mode != "alternate"){
         substitute(D[e] == De,
                    list(De = paste(
                      De, "\u00B1", De.Error, " | fit: ", fit.method
                    )))
+        }else{
+          ""
+        }
       }
 
 
@@ -1494,14 +1815,24 @@ plot_GrowthCurve <- function(
       }, silent = TRUE)
 
       ##LEGEND	#plot legend
+      if (mode == "interpolation") {
+        legend(
+          "topleft",
+          c("REG point", "REG point repeated", "REG point 0"),
+          pch = c(19, 2, 1),
+          cex = 0.8 * cex.global,
+          bty = "n"
+        )
+      }else{
+        legend(
+          "bottomright",
+          c("Dose point", "Dose point rep.", "Dose point 0"),
+          pch = c(19, 2, 1),
+          cex = 0.8 * cex.global,
+          bty = "n"
+        )
 
-      legend(
-        "topleft",
-        c("REG points", "REG point repeated", "REG point 0"),
-        pch = c(19, 2, 1),
-        cex = 0.8 * cex.global,
-        bty = "n"
-      )
+      }
 
       ##plot only if wanted
       if (output.plot == TRUE & output.plotExtended == TRUE) {
@@ -1683,4 +2014,3 @@ plot_GrowthCurve <- function(
   invisible(output.final)
 
 }
-
diff --git a/R/plot_KDE.R b/R/plot_KDE.R
index 9d9e9aa..57d1a1c 100644
--- a/R/plot_KDE.R
+++ b/R/plot_KDE.R
@@ -45,7 +45,7 @@
 #' dataset2)}).
 #'
 #' @param na.rm \code{\link{logical}} (with default): exclude NA values
-#' from the data set prior to any further operations.
+#' from the data set prior to any further operation.
 #'
 #' @param values.cumulative \code{\link{logical}} (with default): show
 #' cumulative individual data.
@@ -80,7 +80,7 @@
 #'
 #' @param output \code{\link{logical}}: Optional output of numerical plot
 #' parameters. These can be useful to reproduce similar plots. Default is
-#' \code{FALSE}.
+#' \code{TRUE}.
 #'
 #' @param \dots further arguments and graphical parameters passed to
 #' \code{\link{plot}}.
@@ -88,7 +88,7 @@
 #' @note The plot output is no 'probability density' plot (cf. the discussion
 #' of Berger and Galbraith in Ancient TL; see references)!
 #'
-#' @section Function version: 3.5.3
+#' @section Function version: 3.5.5
 #'
 #' @author Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
 #' IRAMAT-CRP2A, Universite Bordeaux Montaigne
@@ -161,7 +161,7 @@ plot_KDE <- function(
   summary.pos,
   summary.method = "MCM",
   bw = "nrd0",
-  output = FALSE,
+  output = TRUE,
   ...
 ) {
 
@@ -329,7 +329,7 @@ plot_KDE <- function(
     De.error.global <- c(De.error.global, data[[i]][,2])
 
     ## density range
-    if(!is.na(De.density[[i]])){
+    if(!all(is.na(De.density[[i]]))){
       De.density.range[i,1] <- min(De.density[[i]]$x)
       De.density.range[i,2] <- max(De.density[[i]]$x)
       De.density.range[i,3] <- min(De.density[[i]]$y)
@@ -628,6 +628,8 @@ plot_KDE <- function(
     col.value.bar <- list(...)$col
     col.value.rug <- list(...)$col
     col.boxplot <- list(...)$col
+    col.boxplot.line <- list(...)$col
+    col.boxplot.fill <- NA
     col.mean.line <- adjustcolor(col = list(...)$col,
                                  alpha.f = 0.4)
     col.sd.bar <- adjustcolor(col = list(...)$col,
@@ -636,99 +638,105 @@ plot_KDE <- function(
   } else {
 
     if(length(layout$kde$colour$main) == 1) {
-      col.main <- 1:length(data)
+      col.main <- c(layout$kde$colour$main, 2:length(data))
     } else {
       col.main <- layout$kde$colour$main
     }
 
     if(length(layout$kde$colour$xlab) == 1) {
-      col.xlab <- 1:length(data)
+      col.xlab <- c(layout$kde$colour$xlab, 2:length(data))
     } else {
       col.xlab <- layout$kde$colour$xlab
     }
 
     if(length(layout$kde$colour$ylab1) == 1) {
-      col.ylab1 <- 1:length(data)
+      col.ylab1 <- c(layout$kde$colour$ylab1, 2:length(data))
     } else {
       col.ylab1 <- layout$kde$colour$ylab1
     }
 
     if(length(layout$kde$colour$ylab2) == 1) {
-      col.ylab2 <- 1:length(data)
+      col.ylab2 <- c(layout$kde$colour$ylab2, 2:length(data))
     } else {
       col.ylab2 <- layout$kde$colour$ylab2
     }
 
     if(length(layout$kde$colour$xtck) == 1) {
-      col.xtck <- 1:length(data)
+      col.xtck <- c(layout$kde$colour$xtck, 2:length(data))
     } else {
       col.xtck <- layout$kde$colour$xtck
     }
 
     if(length(layout$kde$colour$ytck1) == 1) {
-      col.ytck1 <- 1:length(data)
+      col.ytck1 <- c(layout$kde$colour$ytck1, 2:length(data))
     } else {
       col.ytck1 <- layout$kde$colour$ytck1
     }
 
     if(length(layout$kde$colour$ytck2) == 1) {
-      col.ytck2 <- 1:length(data)
+      col.ytck2 <- c(layout$kde$colour$ytck2, 2:length(data))
     } else {
       col.ytck2 <- layout$kde$colour$ytck2
     }
 
     if(length(layout$kde$colour$box) == 1) {
-      col.box <- 1:length(data)
+      col.box <- c(layout$kde$colour$box, 2:length(data))
     } else {
       col.box <- layout$kde$colour$box
     }
 
     if(length(layout$kde$colour$mtext) == 1) {
-      col.mtext <- 1:length(data)
+      col.mtext <- c(layout$kde$colour$mtext, 2:length(data))
     } else {
       col.mtext <- layout$kde$colour$mtext
     }
 
     if(length(layout$kde$colour$stats) == 1) {
-      col.stats <- 1:length(data)
+      col.stats <- c(layout$kde$colour$stats, 2:length(data))
     } else {
       col.stats <- layout$kde$colour$stats
     }
 
     if(length(layout$kde$colour$kde.line) == 1) {
-      col.kde.line <- 1:length(data)
+      col.kde.line <- c(layout$kde$colour$kde.line, 2:length(data))
     } else {
       col.kde.line <- layout$kde$colour$kde.line
     }
 
     if(length(layout$kde$colour$kde.fill) == 1) {
-      col.kde.fill <- 1:length(data)
+      col.kde.fill <- c(layout$kde$colour$kde.fill, 2:length(data))
     } else {
       col.kde.fill <- layout$kde$colour$kde.fill
     }
 
     if(length(layout$kde$colour$value.dot) == 1) {
-      col.value.dot <- 1:length(data)
+      col.value.dot <- c(layout$kde$colour$value.dot, 2:length(data))
     } else {
       col.value.dot <- layout$kde$colour$value.dot
     }
 
     if(length(layout$kde$colour$value.bar) == 1) {
-      col.value.bar <- 1:length(data)
+      col.value.bar <- c(layout$kde$colour$value.bar, 2:length(data))
     } else {
       col.value.bar <- layout$kde$colour$value.bar
     }
 
     if(length(layout$kde$colour$value.rug) == 1) {
-      col.value.rug <- 1:length(data)
+      col.value.rug <- c(layout$kde$colour$value.rug, 2:length(data))
     } else {
       col.value.rug <- layout$kde$colour$value.rug
     }
 
-    if(length(layout$kde$colour$boxplot) == 1) {
-      col.boxplot <- 1:length(data)
+    if(length(layout$kde$colour$boxplot.line) == 1) {
+      col.boxplot.line <- c(layout$kde$colour$boxplot.line, 2:length(data))
     } else {
-      col.boxplot <- layout$kde$colour$boxplot
+      col.boxplot.line <- layout$kde$colour$boxplot.line
+    }
+
+    if(length(layout$kde$colour$boxplot.fill) == 1) {
+      col.boxplot.fill <- c(layout$kde$colour$boxplot.fill, 2:length(data))
+    } else {
+      col.boxplot.fill <- layout$kde$colour$boxplot.fill
     }
 
     if(length(layout$kde$colour$mean.line) == 1) {
@@ -739,13 +747,13 @@ plot_KDE <- function(
     }
 
     if(length(layout$kde$colour$sd.bar) == 1) {
-      col.sd.bar <- 1:length(data)
+      col.sd.bar <- c(layout$kde$colour$sd.bar, 2:length(data))
     } else {
       col.sd.bar <- layout$kde$colour$sd.line
     }
 
     if(length(layout$kde$colour$background) == 1) {
-      col.background <- 1:length(data)
+      col.background <- c(layout$kde$colour$background, 2:length(data))
     } else {
       col.background <- layout$kde$colour$background
     }
@@ -931,11 +939,11 @@ plot_KDE <- function(
         cex = cex * layout$kde$font.size$ylab1/12)
 
   for(i in 1:length(data)) {
-    if(!is.na(De.density[[i]])){
+    if(!all(is.na(De.density[[i]]))){
       polygon(x = c(par()$usr[1], De.density[[i]]$x, par()$usr[2]),
               y = c(min(De.density[[i]]$y),De.density[[i]]$y, min(De.density[[i]]$y)),
               border = col.kde.line[i],
-              col = col.kde.fill,
+              col = col.kde.fill[i],
               lty = lty[i],
               lwd = lwd[i])
 
@@ -1071,7 +1079,7 @@ plot_KDE <- function(
               y = c(-11/8 * l_height,
                     -7/8 * l_height),
               lwd = 2,
-              col = col.boxplot[i])
+              col = col.boxplot.line[i])
 
         ## draw q25-q75-polygon
         polygon(x = c(boxplot.data[[i]]$stats[2,1],
@@ -1082,38 +1090,39 @@ plot_KDE <- function(
                       -7/8 * l_height,
                       -7/8 * l_height,
                       -11/8 * l_height),
-                border = col.boxplot[i])
+                col = col.boxplot.fill[i],
+                border = col.boxplot.line[i])
 
         ## draw whiskers
         lines(x = c(boxplot.data[[i]]$stats[2,1],
                     boxplot.data[[i]]$stats[1,1]),
               y = c(-9/8 * l_height,
                     -9/8 * l_height),
-              col = col.boxplot[i])
+              col = col.boxplot.line[i])
 
         lines(x = c(boxplot.data[[i]]$stats[1,1],
                     boxplot.data[[i]]$stats[1,1]),
               y = c(-10/8 * l_height,
                     -8/8 * l_height),
-              col = col.boxplot[i])
+              col = col.boxplot.line[i])
 
         lines(x = c(boxplot.data[[i]]$stats[4,1],
                     boxplot.data[[i]]$stats[5,1]),
               y = c(-9/8 * l_height,
                     -9/8 * l_height),
-              col = col.boxplot[i])
+              col = col.boxplot.line[i])
 
         lines(x = c(boxplot.data[[i]]$stats[5,1],
                     boxplot.data[[i]]$stats[5,1]),
               y = c(-10/8 * l_height,
                     -8/8 * l_height),
-              col = col.boxplot[i])
+              col = col.boxplot.line[i])
 
         ## draw outliers
         points(x = boxplot.data[[i]]$out,
                y = rep(-9/8 * l_height,
                        length(boxplot.data[[i]]$out)),
-               col = col.boxplot[i],
+               col = col.boxplot.line[i],
                cex = cex * 0.8)
       }
 
@@ -1205,9 +1214,10 @@ plot_KDE <- function(
   if(fun==TRUE){sTeve()}
 
   if(output == TRUE) {
-    return(list(De.stats = De.stats,
-                summary.pos = summary.pos,
-                De.density = De.density))
+    return(invisible(list(De.stats = De.stats,
+                          summary.pos = summary.pos,
+                          De.density = De.density)))
   }
 
 }
+
diff --git a/R/plot_RLum.Analysis.R b/R/plot_RLum.Analysis.R
index e3c8dd3..ddc5d40 100644
--- a/R/plot_RLum.Analysis.R
+++ b/R/plot_RLum.Analysis.R
@@ -63,7 +63,7 @@
 #' Only plotting of \code{RLum.Data.Curve} and \code{RLum.Data.Spectrum}
 #' objects are currently supported.\cr
 #'
-#' @section Function version: 0.3.6
+#' @section Function version: 0.3.8
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -83,7 +83,7 @@
 #'##convert values for position 1
 #'temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
 #'
-#'##plot (combine) TL curves in one plot
+#'##(1) plot (combine) TL curves in one plot
 #'plot_RLum.Analysis(
 #' temp,
 #' subset = list(recordType = "TL"),
@@ -92,10 +92,21 @@
 #' abline = list(v = c(110))
 #' )
 #'
+#'##(2) same as example (1) but using
+#'## the argument smooth = TRUE
+#'plot_RLum.Analysis(
+#' temp,
+#' subset = list(recordType = "TL"),
+#' combine = TRUE,
+#' norm = TRUE,
+#' smooth = TRUE,
+#' abline = list(v = c(110))
+#' )
+#'
 #' @export
 plot_RLum.Analysis <- function(
   object,
-  subset,
+  subset = NULL,
   nrows,
   ncols,
   abline = NULL,
@@ -115,7 +126,7 @@ plot_RLum.Analysis <- function(
 
   # Make selection if wanted  -------------------------------------------------------------------
 
-  if(!missing(subset)){
+  if(!is.null(subset)){
 
     ##check whether the user set the drop option and remove it, as we cannot work with it
     subset <- subset[!sapply(names(subset), function(x){"drop" %in% x})]
@@ -373,27 +384,40 @@ plot_RLum.Analysis <- function(
         ##PLOT
         ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
         ##plot RLum.Data.Curve curve
-        plot_RLum.Data.Curve(
-          temp[[i]],
-          col = col,
-          mtext = if(!is.null(plot.settings$mtext[[i]])){
-            plot.settings$mtext[[i]]
-          }else{
-            paste("#", i, sep = "")
-          },
-          par.local = FALSE,
-          main = main,
-          log = plot.settings$log[[i]],
-          lwd = plot.settings$lwd[[i]],
-          type = plot.settings$type[[i]],
-          lty = plot.settings$lty[[i]],
-          xlim = xlim.set,
-          ylim = ylim.set,
-          pch = plot.settings$pch[[i]],
-          cex = plot.settings$cex[[i]],
-          smooth = plot.settings$smooth[[i]],
-          ...
-        )
+
+          ##we have to do this via this way, otherwise we run into a duplicated arguments
+          ##problem
+          ##check and remove duplicated arguments
+          arguments <- c(
+            list(
+              object = temp[[i]],
+              col = col,
+              mtext = if (!is.null(plot.settings$mtext[[i]])) {
+                plot.settings$mtext[[i]]
+              } else{
+                paste("#", i, sep = "")
+              },
+              par.local = FALSE,
+              main = main,
+              log = plot.settings$log[[i]],
+              lwd = plot.settings$lwd[[i]],
+              type = plot.settings$type[[i]],
+              lty = plot.settings$lty[[i]],
+              xlim = xlim.set,
+              ylim = ylim.set,
+              pch = plot.settings$pch[[i]],
+              cex = plot.settings$cex[[i]],
+              smooth = plot.settings$smooth[[i]]
+            ),
+            list(...)
+          )
+
+          arguments[duplicated(names(arguments))] <- NULL
+
+        ##call the fucntion plot_RLum.Data.Curve
+        do.call(what = "plot_RLum.Data.Curve", args = arguments)
+        rm(arguments)
+
 
         ##add abline
         if(!is.null(abline[[i]])){
@@ -437,7 +461,6 @@ plot_RLum.Analysis <- function(
 
     })
 
-
     ##account for different curve types, combine similar
     temp.object.structure  <- structure_RLum(object)
     temp.recordType <- as.character(unique(temp.object.structure$recordType))
@@ -582,6 +605,8 @@ plot_RLum.Analysis <- function(
       } else {
         c(min(object.structure$x.min), max(object.structure$x.max))
       }
+      if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE))
+        xlim[which(xlim == 0)] <- 1
 
       ##ylim
       ylim <- if (!is.null(plot.settings$ylim[[k]]) & length(plot.settings$ylim[[k]]) > 1) {
@@ -592,6 +617,8 @@ plot_RLum.Analysis <- function(
         })))
 
       }
+      if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE))
+        ylim[which(ylim == 0)] <- 1
 
       ##col (again)
       col <- if(length(plot.settings$col[[k]]) > 1 || plot.settings$col[[k]][1] != "black"){
@@ -616,6 +643,15 @@ plot_RLum.Analysis <- function(
 
       }
 
+      ##pch
+      if (length(plot.settings$pch[[k]]) < length(object.list)) {
+        pch <- rep(plot.settings$pch[[k]], times = length(object.list))
+
+      }else{
+        pch <- plot.settings$pch[[k]]
+
+      }
+
       ##legend.text
       legend.text <- if(!is.null(plot.settings$legend.text[[k]])){
         plot.settings$legend.text[[k]]
@@ -645,10 +681,9 @@ plot_RLum.Analysis <- function(
 
       if (legend.pos == "outside") {
         par.default.outside <- par()[c("mar", "xpd")]
-        par(mar = c(5.1, 4.1, 4.1, 8.1), xpd = TRUE)
+        par(mar = c(5.1, 4.1, 4.1, 8.1))
       }
 
-
       ##open plot area
       plot(
         NA,NA,
@@ -676,11 +711,35 @@ plot_RLum.Analysis <- function(
                                             k = k_factor, fill = NA)
         }
 
+        ##remove 0 values if plotted on a log-scale
+        # y-Axis
+        if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE))
+          temp.data.list[[n]] <- temp.data.list[[n]][which(temp.data.list[[n]]$y > 0), ]
+        # x-Axis
+        if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE))
+          temp.data.list[[n]] <- temp.data.list[[n]][which(temp.data.list[[n]]$x > 0), ]
+
         ##print lines
-        lines(temp.data.list[[n]],
-              col = col[n],
-              lty = lty[n],
-              lwd = plot.settings$lwd[[k]])
+        if (plot.settings$type[[k]] == "l" | plot.settings$type[[k]] == "b" ) {
+          lines(
+            temp.data.list[[n]],
+            col = col[n],
+            lty = lty[n],
+            lwd = plot.settings$lwd[[k]]
+          )
+
+        }
+
+        ##add points if requested
+        if (plot.settings$type[[k]] == "p" | plot.settings$type[[k]] == "b" ) {
+          points(
+            temp.data.list[[n]],
+            col = col[n],
+            pch = pch[n],
+
+          )
+
+        }
 
       }
 
@@ -693,11 +752,29 @@ plot_RLum.Analysis <- function(
       ##mtext
       mtext(plot.settings$mtext[[k]], side = 3, cex = .8 * plot.settings$cex[[k]])
 
+      ##if legend is outside of the plotting area we need to allow overplotting
+      ##AFTER all lines have been drawn
+      if (legend.pos == "outside") {
+        par(xpd = TRUE)
+
+        # determine legend position on log(y) scale
+        if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE))
+          ypos <- 10^par()$usr[4]
+        else
+          ypos <- par()$usr[4]
+
+        # determine position on log(x) scale
+        if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE))
+          xpos <- 10^par()$usr[2]
+        else
+          xpos <- par()$usr[2]
+      }
+
       ##legend
       if (plot.settings$legend[[k]]) {
         legend(
-          x = ifelse(legend.pos == "outside", par()$usr[2], legend.pos),
-          y = ifelse(legend.pos == "outside", par()$usr[4], NULL),
+          x = ifelse(legend.pos == "outside", xpos, legend.pos),
+          y = ifelse(legend.pos == "outside", ypos, NULL),
           legend = legend.text,
           lwd = plot.settings$lwd[[k]],
           lty = plot.settings$lty[[k]],
@@ -710,6 +787,9 @@ plot_RLum.Analysis <- function(
           cex = 0.8 * plot.settings$cex[[k]]
         )
 
+        # revert the overplotting
+        if (legend.pos == "outside")
+          par(xpd = FALSE)
       }
 
     }
diff --git a/R/plot_RLum.Data.Curve.R b/R/plot_RLum.Data.Curve.R
index 0f8343a..337a552 100644
--- a/R/plot_RLum.Data.Curve.R
+++ b/R/plot_RLum.Data.Curve.R
@@ -26,7 +26,7 @@
 #'
 #' @note Not all arguments of \code{\link{plot}} will be passed!
 #'
-#' @section Function version: 0.2.0
+#' @section Function version: 0.2.3
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -65,43 +65,52 @@ plot_RLum.Data.Curve<- function(
 
   ##check if object is of class RLum.Data.Curve
   if(class(object) != "RLum.Data.Curve"){
-
     stop("[plot_RLum.Data.Curve()] Input object is not of type RLum.Data.Curve")
 
   }
 
   ##stop for NA values
-  if (!anyNA(object at data)) {
+  if (!all(is.na(object at data))) {
+
     ##set labeling unit
-    lab.unit <- if (object at recordType == "OSL" |
-                    object at recordType == "IRSL" |
-                    object at recordType == "RL" |
-                    object at recordType == "RF" |
-                    object at recordType == "LM-OSL" |
-                    object at recordType == "RBR") {
-      "s"
-    }
-    else if (object at recordType == "TL") {
-      "\u00B0C"
-    }
-    else {
-      "Unknown"
+    if(!is.na(object at recordType)){
+      lab.unit <- if (object at recordType == "OSL" |
+                      object at recordType == "IRSL" |
+                      object at recordType == "RL" |
+                      object at recordType == "RF" |
+                      object at recordType == "LM-OSL" |
+                      object at recordType == "RBR") {
+        "s"
+      } else if (object at recordType == "TL") {
+        "\u00B0C"
+      }
+      else {
+        "Unknown"
+      }
+    }else{
+      lab.unit <- "Unknown"
+
     }
 
-    lab.xlab <- if (object at recordType == "OSL" |
-                    object at recordType == "IRSL" |
-                    object at recordType == "RL" |
-                    object at recordType == "RF" |
-                    object at recordType == "RBR" |
-                    object at recordType == "LM-OSL"){
+    if(!is.na(object at recordType)){
+      lab.xlab <- if (object at recordType == "OSL" |
+                      object at recordType == "IRSL" |
+                      object at recordType == "RL" |
+                      object at recordType == "RF" |
+                      object at recordType == "RBR" |
+                      object at recordType == "LM-OSL"){
+
+        "Stimulation time"
+      }
+      else if (object at recordType == "TL") {
+        "Temperature"
+      }
+      else {
+        "Independent"
+      }
+    }else{
+      lab.xlab <- "Independent"
 
-      "Stimulation time"
-    }
-    else if (object at recordType == "TL") {
-      "Temperature"
-    }
-    else {
-      "Independent"
     }
 
     ##XSYG
@@ -228,7 +237,7 @@ plot_RLum.Data.Curve<- function(
       extraArgs$ylim
     } else
     {
-      c(min(object at data[,2]),max(object at data[,2]))
+      c(min(object at data[,2], na.rm = TRUE),max(object at data[,2], na.rm = TRUE))
     }
 
     xlim <- if ("xlim" %in% names(extraArgs)) {
@@ -307,8 +316,7 @@ plot_RLum.Data.Curve<- function(
     }
 
   }else{
-
-    warning("[plot_RLum.Data.Curve()] Curve contains NA-values, nothing plotted.")
+    warning("[plot_RLum.Data.Curve()] Curve contains only NA-values, nothing plotted.", call. = FALSE)
 
   }
 
diff --git a/R/plot_RLum.Data.Spectrum.R b/R/plot_RLum.Data.Spectrum.R
index 94ac93a..f1408d5 100644
--- a/R/plot_RLum.Data.Spectrum.R
+++ b/R/plot_RLum.Data.Spectrum.R
@@ -78,8 +78,9 @@
 #' \bold{Further arguments that will be passed (depending on the plot type)}
 #'
 #' \code{xlab}, \code{ylab}, \code{zlab}, \code{xlim}, \code{ylim},
-#' \code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type}, \code{col},
-#' \code{border}, \code{box} \code{lwd}, \code{bty} \cr
+#' \code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type} ("single", "multiple.lines",
+#' "interactive"), \code{col},
+#' \code{border}, \code{box} \code{lwd}, \code{bty}, \code{showscale} ("interactive") \cr
 #'
 #' @param object \code{\linkS4class{RLum.Data.Spectrum}} or \code{\link{matrix}} (\bold{required}): S4
 #' object of class \code{RLum.Data.Spectrum} or a \code{matrix} containing count values of the spectrum.\cr
@@ -135,7 +136,7 @@
 #'
 #' @note Not all additional arguments (\code{...}) will be passed similarly!
 #'
-#' @section Function version: 0.5.0
+#' @section Function version: 0.5.3
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -180,12 +181,26 @@
 #'                         bin.cols = 1)
 #'
 #' \dontrun{
-#'  ##(4) interactive plot using the package plotly
+#'  ##(4) interactive plot using the package plotly ("surface")
 #'  plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
 #'  xlim = c(310,750), ylim = c(0,300), bin.rows=10,
 #'  bin.cols = 1)
 #'
-#'  ##(5) alternative using the package fields
+#'  ##(5) interactive plot using the package plotly ("contour")
+#'  plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+#'  xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+#'  bin.cols = 1,
+#'  type = "contour",
+#'  showscale = TRUE)
+#'
+#'  ##(6) interactive plot using the package plotly ("heatmap")
+#'  plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+#'  xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+#'  bin.cols = 1,
+#'  type = "heatmap",
+#'  showscale = TRUE)
+#'
+#'  ##(7) alternative using the package fields
 #'  fields::image.plot(get_RLum(TL.Spectrum))
 #'  contour(get_RLum(TL.Spectrum), add = TRUE)
 #'
@@ -337,7 +352,15 @@ plot_RLum.Data.Spectrum <- function(
   {""}
 
   type<- if("type" %in% names(extraArgs)) {extraArgs$type} else
-  {"l"}
+  {
+    if (plot.type == "interactive") {
+      "surface"
+
+    } else{
+      "l"
+
+    }
+  }
 
   pch<- if("pch" %in% names(extraArgs)) {extraArgs$pch} else
   {1}
@@ -351,6 +374,11 @@ plot_RLum.Data.Spectrum <- function(
   sub<- if("sub" %in% names(extraArgs)) {extraArgs$sub} else
   {""}
 
+  #for plotly::plot_ly
+  showscale<- if("showscale" %in% names(extraArgs)) {extraArgs$showscale} else
+  {FALSE}
+
+
 
   # prepare values for plot ---------------------------------------------------
   temp.xyz <- get_RLum(object)
@@ -410,7 +438,13 @@ plot_RLum.Data.Spectrum <- function(
 
   # Channel binning ---------------------------------------------------------
 
-  if(missing(bin.rows) == FALSE){
+  ##fatal checks
+  if(bin.cols < 1 | bin.rows < 1){
+    stop("[plot_RLum.Data.Spectrum()] 'bin.cols' and 'bin.rows' have to be > 1!", call. = FALSE)
+
+  }
+
+  if(bin.rows > 1){
 
     ##calculate n.rows
     n.rows <- nrow(temp.xyz)
@@ -427,13 +461,12 @@ plot_RLum.Data.Spectrum <- function(
     ##sum up rows
     temp.xyz <- rowsum(temp.xyz, bin.group)
 
-    ##correct labeling
+    ##correct labelling
     x <- x[seq(1, n.rows, bin.rows)]
 
     ## to avoid odd plots remove last group if bin.rows is not a multiple
     ## of the row number
     if(bin.group.rest != 0){
-
       temp.xyz <- temp.xyz[-nrow(temp.xyz),]
       x <- x[-length(x)]
 
@@ -441,13 +474,15 @@ plot_RLum.Data.Spectrum <- function(
 
     }
 
+    ##replace rownames
+    rownames(temp.xyz) <- as.character(x)
 
     rm(bin.group.rest)
 
   }
 
 
-  if(missing(bin.cols) == FALSE){
+  if(bin.cols > 1){
 
     ##calculate n.cols
     n.cols <- ncol(temp.xyz)
@@ -456,7 +491,6 @@ plot_RLum.Data.Spectrum <- function(
     if(bin.cols > n.cols){
 
       bin.cols <- n.cols
-
       warning("bin.cols > the number of columns. Value reduced to number of cols.")
 
     }
@@ -488,6 +522,9 @@ plot_RLum.Data.Spectrum <- function(
 
     }
 
+    ##replace colnames
+    colnames(temp.xyz) <- as.character(y)
+
   }
 
   ##limit z-values if requested, this idea was taken from the Diss. by Thomas Schilles, 2002
@@ -695,6 +732,7 @@ plot_RLum.Data.Spectrum <- function(
     ##interactive plot and former persp3d
     ## ==========================================================================#
 
+    ## Plot: interactive ----
     ##http://r-pkgs.had.co.nz/description.html
     if (!requireNamespace("plotly", quietly = TRUE)) {
       stop("[plot_RLum.Data.Spectrum()] Package 'plotly' needed for this plot type. Please install it.",
@@ -702,21 +740,27 @@ plot_RLum.Data.Spectrum <- function(
     }
 
        ##set up plot
-       p <- plotly::plot_ly(
-         x = y,
-         y = x,
-         z = temp.xyz,
-         type = "surface",
-         showscale = FALSE
-         #colors = col[1:(length(col)-1)],
-         )
+        p <- plotly::plot_ly(
+          z = temp.xyz,
+          x = as.numeric(colnames(temp.xyz)),
+          y = as.numeric(rownames(temp.xyz)),
+          type = type,
+          showscale = showscale
+          #colors = col[1:(length(col)-1)],
+        )
+
 
        ##change graphical parameters
        p <-  plotly::layout(
          p = p,
          scene = list(
-           xaxis = list(title = ylab),
-           yaxis = list(title = xlab),
+           xaxis = list(
+             title = ylab
+
+           ),
+           yaxis = list(
+             title = xlab
+           ),
            zaxis = list(title = zlab)
 
          ),
@@ -724,6 +768,7 @@ plot_RLum.Data.Spectrum <- function(
        )
 
        print(p)
+       on.exit(return(p))
 
 
   }else if(plot.type == "contour" && ncol(temp.xyz) > 1) {
diff --git a/R/plot_RLum.R b/R/plot_RLum.R
index 60b8c0b..e0eb124 100644
--- a/R/plot_RLum.R
+++ b/R/plot_RLum.R
@@ -33,7 +33,7 @@
 #'
 #' @note The provided plot output depends on the input object.
 #'
-#' @section Function version: 0.4.2
+#' @section Function version: 0.4.3
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -84,7 +84,17 @@ plot_RLum<- function(
         RLum.Data.Curve = plot_RLum.Data.Curve(object = object, ...),
         RLum.Data.Spectrum = plot_RLum.Data.Spectrum(object = object, ...),
         RLum.Data.Image = plot_RLum.Data.Image(object = object, ...),
-        RLum.Analysis = plot_RLum.Analysis(object = object, ...),
+
+        ##this we have to do prevent the partial matching with 'sub' by 'subset'
+        RLum.Analysis =
+          if(!grepl(pattern = "subset", x = paste(deparse(match.call()), collapse = " "), fixed = TRUE)){
+          plot_RLum.Analysis(object = object, subset = NULL, ...)
+
+        }else{
+          plot_RLum.Analysis(object = object, ...)
+
+        },
+
         RLum.Results = plot_RLum.Results(object = object, ...)
 
       )
diff --git a/R/plot_RLum.Results.R b/R/plot_RLum.Results.R
index 354feca..5df3d45 100644
--- a/R/plot_RLum.Results.R
+++ b/R/plot_RLum.Results.R
@@ -914,12 +914,12 @@ plot_RLum.Results<- function(
   #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
   ## CASE 5: Aliquot Size
   if(object at originator=="calc_AliquotSize") {
-    if(object at data$args$MC == TRUE) {
+    if(!is.null(object at data$MC$estimates)) {
 
       extraArgs <- list(...)
 
-      main<- if("main" %in% names(extraArgs)) { extraArgs$main } else { "Monte Carlo Simulation"  }
-      xlab<- if("xlab" %in% names(extraArgs)) { extraArgs$xlab } else { "Amount of grains on aliquot" }
+      main <- if("main" %in% names(extraArgs)) { extraArgs$main } else { "Monte Carlo Simulation"  }
+      xlab <- if("xlab" %in% names(extraArgs)) { extraArgs$xlab } else { "Amount of grains on aliquot" }
 
       # extract relevant data
       MC.n<- object at data$MC$estimates
diff --git a/R/plot_RadialPlot.R b/R/plot_RadialPlot.R
index 21671b5..ef6fabf 100644
--- a/R/plot_RadialPlot.R
+++ b/R/plot_RadialPlot.R
@@ -38,9 +38,6 @@
 #' data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.
 #' @param na.rm \code{\link{logical}} (with default): excludes \code{NA}
 #' values from the data set prior to any further operations.
-#' @param negatives \code{\link{character}} (with default): rule for negative
-#' values. Default is \code{"remove"} (i.e. negative values are removed from
-#' the data set).
 #' @param log.z \code{\link{logical}} (with default): Option to display the
 #' z-axis in logarithmic scale. Default is \code{TRUE}.
 #' @param central.value \code{\link{numeric}}: User-defined central value,
@@ -94,7 +91,7 @@
 #' @param \dots Further plot arguments to pass. \code{xlab} must be a vector of
 #' length 2, specifying the upper and lower x-axes labels.
 #' @return Returns a plot object.
-#' @section Function version: 0.5.3
+#' @section Function version: 0.5.4
 #' @author Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
 #' IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Based on a rewritten
 #' S script of Rex Galbraith, 2010
@@ -219,7 +216,6 @@
 plot_RadialPlot <- function(
   data,
   na.rm = TRUE,
-  negatives = "remove",
   log.z = TRUE,
   central.value,
   centrality = "mean.weighted",
@@ -265,11 +261,25 @@ plot_RadialPlot <- function(
   if(missing(summary.pos) == TRUE) {
     summary.pos <- "sub"
   }
-  if(missing(bar.col) == TRUE) {bar.col <- rep("grey80", length(data))}
-  if(missing(grid.col) == TRUE) {grid.col <- rep("grey70", length(data))}
-  if(missing(summary) == TRUE) {summary <- NULL}
-  if(missing(summary.pos) == TRUE) {summary.pos <- "topleft"}
-  if(missing(mtext) == TRUE) {mtext <- ""}
+  if(missing(bar.col) == TRUE) {
+    bar.col <- rep("grey80", length(data))
+  }
+
+  if(missing(grid.col) == TRUE) {
+    grid.col <- rep("grey70", length(data))
+  }
+
+  if(missing(summary) == TRUE) {
+    summary <- NULL
+  }
+
+  if(missing(summary.pos) == TRUE) {
+    summary.pos <- "topleft"
+  }
+
+  if(missing(mtext) == TRUE) {
+    mtext <- ""
+  }
 
 
   ## check z-axis log-option for grouped data sets
@@ -300,40 +310,45 @@ plot_RadialPlot <- function(
   } else {
     z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100)
     z.span <- ifelse(z.span > 1, 0.9, z.span)
-    limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) * min(De.global),
+    limits.z <- c((ifelse(test = min(De.global) <= 0,
+                          yes = 1.1,
+                          no =  0.9) - z.span) * min(De.global),
                   (1.1 + z.span) * max(De.global))
   }
 
   ticks <- round(pretty(limits.z, n = 5), 3)
   De.delta <- ticks[2] - ticks[1]
 
-#   ## calculate correction dose to shift negative values
-#   if(min(De.global) <= 0) {
-#     De.add <- abs(ticks[length(ticks) - sum(ticks > limits.z[1])])
-#   } else {De.add <- 0}
+  ## calculate correction dose to shift negative values
+  if(min(De.global) <= 0) {
 
-  ## optionally, reassign De.add to remove negative values
-  if(negatives == "remove") {
-    De.add <- 0
+    if("zlim" %in% names(extraArgs)) {
 
-    for(i in 1:length(data)) {
-      data.test <- data[[i]][,1] <= 0
-      data[[i]] <- data[[i]][!data.test,]
-      data.negative <- paste(seq(1, length(data.test))[data.test == TRUE],
-                             collapse = ", ")
-      if(sum(data.test) > 0) {
-        warning(paste("The following lines contain zero or negative values: ",
-                      data.negative,
-                      ".",
-                      sep = ""))
+      De.add <- abs(extraArgs$zlim[1])
+    } else {
+
+      ## estimate delta De to add to all data
+      De.add <-  min(10^ceiling(log10(abs(De.global))) * 10)
+
+      ## optionally readjust delta De for extreme values
+      if(De.add <= abs(min(De.global))) {
+
+        De.add <- De.add * 10
       }
     }
+  } else {
+    De.add <- 0
   }
 
   ## optionally add correction dose to data set and adjust error
-  for(i in 1:length(data)) {
-    data[[i]][,1] <- data[[i]][,1] + De.add
-    data[[i]][,2] <- data[[i]][,2] * data[[i]][,1] / abs(data[[i]][,1] - De.add)
+  if(log.z == TRUE) {
+
+    for(i in 1:length(data)) {
+      data[[i]][,1] <- data[[i]][,1] + De.add
+    }
+
+    De.global <- De.global + De.add
+
   }
 
   ## calculate major preliminary tick values and tick difference
@@ -349,36 +364,29 @@ plot_RadialPlot <- function(
   ticks <- round(pretty(limits.z, n = 5), 3)
   De.delta <- ticks[2] - ticks[1]
 
-  ## calculate correction dose to shift negative values
-  if(min(De.global) <= 0) {
-    De.add <- abs(ticks[length(ticks) - sum(ticks > limits.z[1])])
-  } else {De.add <- 0}
-
-  if(negatives == "remove") {
-    De.add <- 0
-  }
-  ## optionally add correction dose to data set and adjust error
-  for(i in 1:length(data)) {
-    data[[i]][,1] <- data[[i]][,1] + De.add
-    data[[i]][,2] <- data[[i]][,2] * data[[i]][,1] / abs(data[[i]][,1] - De.add)
-  }
-
-  ## adjust limits.z
-  limits.z <- limits.z + 2 * De.add
-
   ## calculate and append statistical measures --------------------------------
 
   ## z-values based on log-option
-  z <- sapply(1:length(data), function(x){
+  z <- lapply(1:length(data), function(x){
     if(log.z == TRUE) {log(data[[x]][,1])} else {data[[x]][,1]}})
+
   if(is(z, "list") == FALSE) {z <- list(z)}
   data <- lapply(1:length(data), function(x) {
      cbind(data[[x]], z[[x]])})
   rm(z)
 
   ## calculate se-values based on log-option
-  se <- sapply(1:length(data), function(x){
-    if(log.z == TRUE) {data[[x]][,2] / data[[x]][,1]} else {data[[x]][,2]}})
+  se <- lapply(1:length(data), function(x, De.add){
+    if(log.z == TRUE) {
+
+      if(De.add != 0) {
+        data[[x]][,2] <- data[[x]][,2] / (data[[x]][,1] + De.add)
+      } else {
+        data[[x]][,2] / data[[x]][,1]
+      }
+    } else {
+      data[[x]][,2]
+    }}, De.add = De.add)
   if(is(se, "list") == FALSE) {se <- list(se)}
   data <- lapply(1:length(data), function(x) {
     cbind(data[[x]], se[[x]])})
@@ -420,9 +428,9 @@ plot_RadialPlot <- function(
   } else if(is.numeric(centrality) == TRUE &
               length(centrality) == length(data)) {
     z.central.raw <- if(log.z == TRUE) {
-      log(centrality)
+      log(centrality + De.add)
     } else {
-      centrality
+      centrality + De.add
     }
     z.central <- lapply(1:length(data), function(x){
       rep(z.central.raw[x], length(data[[x]][,3]))})
@@ -439,7 +447,7 @@ plot_RadialPlot <- function(
   rm(z.central)
 
   ## calculate precision
-  precision <- sapply(1:length(data), function(x){
+  precision <- lapply(1:length(data), function(x){
     1 / data[[x]][,4]})
   if(is(precision, "list") == FALSE) {precision <- list(precision)}
   data <- lapply(1:length(data), function(x) {
@@ -447,7 +455,7 @@ plot_RadialPlot <- function(
   rm(precision)
 
   ## calculate standard estimate
-  std.estimate <- sapply(1:length(data), function(x){
+  std.estimate <- lapply(1:length(data), function(x){
     (data[[x]][,3] - data[[x]][,5]) / data[[x]][,4]})
   if(is(std.estimate, "list") == FALSE) {std.estimate <- list(std.estimate)}
   data <- lapply(1:length(data), function(x) {
@@ -522,11 +530,12 @@ if(centrality[1] == "mean") {
   ## optionally adjust zentral value by user-defined value
   if(missing(central.value) == FALSE) {
 
-    ## adjust central value for De.add
-    central.value <- central.value + 2 * De.add
+    # ## adjust central value for De.add
+    central.value <- central.value + De.add
 
     z.central.global <- ifelse(log.z == TRUE,
-                               log(central.value), central.value)
+                               log(central.value),
+                               central.value)
   }
 
   ## create column names
@@ -602,9 +611,6 @@ if(centrality[1] == "mean") {
     limits.z <- c((0.9 - z.span) * min(data.global[[1]]),
                   (1.1 + z.span) * max(data.global[[1]]))
   }
-  if(limits.z[1] <= 0) {
-    limits.z <- limits.z + 2 * De.add
-  }
 
   if("xlim" %in% names(extraArgs)) {
     limits.x <- extraArgs$xlim
@@ -718,7 +724,7 @@ if(centrality[1] == "mean") {
   }
 
   ## calculate z-axis radius
-  r.x <- limits.x[2] / max(data.global[,6]) + 0.03
+  r.x <- limits.x[2] / max(data.global[,6]) + 0.05
   r <- max(sqrt((data.global[,6])^2+(data.global[,7] * f)^2)) * r.x
 
   ## calculate major z-tick coordinates
@@ -759,7 +765,7 @@ if(centrality[1] == "mean") {
 
   ## subtract De.add from label values
   if(De.add != 0) {
-    label.z.text <- label.z.text - 2 * De.add
+    label.z.text <- label.z.text - De.add
   }
 
   labels <- cbind(label.x, label.y, label.z.text)
@@ -779,6 +785,7 @@ if(centrality[1] == "mean") {
                          (data[[i]][1,5] - z.central.global) *
                            polygons[i,4] - 2)
   }
+
   ## calculate node coordinates for semi-circle
   user.limits <- if(log.z == TRUE) {
     log(limits.z)
@@ -798,10 +805,24 @@ if(centrality[1] == "mean") {
   ellipse <- cbind(ellipse.x, ellipse.y)
   ellipse.lims <- rbind(range(ellipse[,1]), range(ellipse[,2]))
 
+  ## check if z-axis overlaps with 2s-polygon
+  polygon_y_max <- max(polygons[,7])
+  polygon_y_min <- min(polygons[,7])
+
+  z_2s_upper <- ellipse.x[abs(ellipse.y - polygon_y_max) ==
+                            min(abs(ellipse.y - polygon_y_max))]
+
+  z_2s_lower <- ellipse.x[abs(ellipse.y - polygon_y_min) ==
+                            min(abs(ellipse.y - polygon_y_min))]
+
+  if(max(polygons[,3]) >= z_2s_upper | max(polygons[,3]) >= z_2s_lower) {
+    print("[plot_RadialPlot] Warning: z-scale touches 2s-polygon. Decrease plot ratio.")
+  }
+
   ## calculate statistical labels
   if(length(stats == 1)) {stats <- rep(stats, 2)}
   stats.data <- matrix(nrow = 3, ncol = 3)
-  data.stats <- as.numeric(data.global[,1] - 2 * De.add)
+  data.stats <- as.numeric(data.global[,1])
 
   if("min" %in% stats == TRUE) {
     stats.data[1, 3] <- data.stats[data.stats == min(data.stats)][1]
@@ -831,7 +852,6 @@ if(centrality[1] == "mean") {
     if(limits.z.y[2] > 0.77 * limits.y[2]) {
       limits.y[2] <- 1.3 * limits.z.y[2]
     }
-#    limits.y <- c(-max(abs(limits.y)), max(abs(limits.y)))
   }
   if(!("xlim" %in% names(extraArgs))) {
     if(limits.z.x[2] > 1.1 * limits.x[2]) {
@@ -861,7 +881,9 @@ if(centrality[1] == "mean") {
                           "se.rel.weighted")
 
   for(i in 1:length(data)) {
-    statistics <- calc_Statistics(data[[i]])
+    data_to_stats <- data[[i]]
+    data_to_stats$De <- data_to_stats$De - De.add
+    statistics <- calc_Statistics(data = data_to_stats)
     De.stats[i,1] <- statistics$weighted$n
     De.stats[i,2] <- statistics$unweighted$mean
     De.stats[i,3] <- statistics$weighted$mean
@@ -880,13 +902,20 @@ if(centrality[1] == "mean") {
     De.stats[i,17] <- statistics$weighted$se.abs
     De.stats[i,18] <- statistics$weighted$se.rel
 
-    ##kdemax - here a little doubled as it appears below again
-    De.density <-density(x = data[[i]][,1],
-                         kernel = "gaussian",
-                         from = limits.z[1],
-                         to = limits.z[2])
+    ## kdemax - here a little doubled as it appears below again
+    De.density <- try(density(x = data[[i]][,1],
+                              kernel = "gaussian",
+                              from = limits.z[1],
+                              to = limits.z[2]),
+                      silent = TRUE)
+
+    if(class(De.density) == "try-error") {
+
+      De.stats[i,6] <- NA
+    } else {
 
-    De.stats[i,6] <- De.density$x[which.max(De.density$y)]
+      De.stats[i,6] <- De.density$x[which.max(De.density$y)]
+    }
   }
 
   label.text = list(NA)
@@ -1223,6 +1252,9 @@ label.text[[1]] <- NULL
 
   ## calculate line coordinates and further parameters
   if(missing(line) == FALSE) {
+
+    line = line + De.add
+
     if(log.z == TRUE) {line <- log(line)}
 
     line.coords <- list(NA)
diff --git a/R/plot_ViolinPlot.R b/R/plot_ViolinPlot.R
index c9531d4..705df57 100644
--- a/R/plot_ViolinPlot.R
+++ b/R/plot_ViolinPlot.R
@@ -46,7 +46,7 @@
 #' two other R packages exist providing a possibility to produces this kind of plot, namely:
 #' 'vioplot' and 'violinmplot' (see References for details).
 #'
-#' @section Function version: 0.1.2
+#' @section Function version: 0.1.3
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 #'
@@ -112,8 +112,9 @@ plot_ViolinPlot <- function(
     if(na.rm){
       data <- na.exclude(data)
 
-      warning(paste("[plot_ViolinPlot()]",
-        length(attr(x = na.exclude(c(NA,1,2, NA)), which = "na.action", exact = TRUE))), " NA values removed!", call. = FALSE)
+      if(length(attr(data, "na.action")) > 0){
+        warning(paste("[plot_ViolinPlot()]", length(attr(data, "na.action")), "NA values removed!"), call. = FALSE)
+      }
 
     }
 
diff --git a/R/read_BIN2R.R b/R/read_BIN2R.R
index 52beeb9..ec05e16 100644
--- a/R/read_BIN2R.R
+++ b/R/read_BIN2R.R
@@ -9,9 +9,8 @@
 #' \code{http://www.nutech.dtu.dk/}
 #'
 #' @param file \code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
-#' BIN/BINX file. If input is a \code{list} it should comprise only \code{character}s representing
-#' each valid path and BIN/BINX-file names.
-#' Alternatively the input character can be just a directory (path), in this case the
+#' BIN/BINX file (URLs are supported). If input is a \code{list} it should comprise only \code{character}s representing
+#' each valid path and BIN/BINX-file names. Alternatively the input character can be just a directory (path), in this case the
 #' the function tries to detect and import all BIN/BINX files found in the directory.
 #'
 #' @param show.raw.values \link{logical} (with default): shows raw values from
@@ -49,6 +48,10 @@
 #' BIN-file version is not supported. Can be provided as \code{list} if \code{file} is a \code{list}.\cr
 #' Note: The usage is at own risk, only supported BIN-file versions have been tested.
 #'
+#' @param ignore.RECTYPE \code{\link{logical}} (with default): this argument allows to ignore values
+#' in the byte 'REGTYPE' (BIN-file version 08), in case there are not documented or faulty set.
+#' If set all records are treated like records of 'REGYPE' 0 or 1.
+#'
 #' @param pattern \code{\link{character}} (optional): argument that is used if only a path is provided.
 #' The argument will than be passed to the function \code{\link{list.files}} used internally to
 #' construct a \code{list} of wanted files
@@ -76,7 +79,7 @@
 #' import.}
 #'
 #'
-#' @section Function version: 0.15.0
+#' @section Function version: 0.15.6
 #'
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
@@ -90,7 +93,7 @@
 #'
 #' @references
 #' DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
-#' \url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+#' \url{http://www.nutech.dtu.dk/english/products-and-services/radiation-instruments/tl_osl_reader/manuals}
 #'
 #'
 #' @keywords IO
@@ -117,6 +120,7 @@ read_BIN2R <- function(
   show.record.number = FALSE,
   txtProgressBar = TRUE,
   forced.VersionNumber = NULL,
+  ignore.RECTYPE = FALSE,
   pattern = NULL,
   verbose = TRUE,
   ...
@@ -260,21 +264,75 @@ read_BIN2R <- function(
   }
 
 
+  # Config --------------------------------------------------------------------------------------
+  ##set file_link for internet downloads
+  file_link <- NULL
+  on_exit <- function(){
+
+    ##unlink internet connection
+    if(!is.null(file_link)){
+      unlink(file_link)
+    }
+
+    ##close connection
+    if(!is.null(con)){
+      close(con)
+
+    }
+
+  }
+  on.exit(expr = on_exit())
+
 
   # Integrity checks ------------------------------------------------------
 
   ##check if file exists
   if(!file.exists(file)){
 
-    stop("[read_BIN2R()] File does not exists!")
+    ##check whether the file as an URL
+    if(grepl(pattern = "http", x = file, fixed = TRUE)){
+      if(verbose){
+        cat("[read_BIN2R()] URL detected, checking connection ... ")
+      }
+
+      ##check URL
+      if(!httr::http_error(file)){
+        if(verbose) cat("OK")
+
+        ##dowload file
+        file_link <- tempfile("read_BIN2R_FILE")
+        download.file(file, destfile = file_link, quiet = ifelse(verbose, FALSE, TRUE), mode = "wb")
+
+      }else{
+        cat("FAILED")
+        con <- NULL
+        stop("[read_BIN2R()] File does not exist!", call. = FALSE)
+
+      }
+
+    }else{
+      con <- NULL
+      stop("[read_BIN2R()] File does not exist!", call. = FALSE)
+
+    }
 
   }
 
   ##check if file is a BIN or BINX file
-  if(!(TRUE%in%(c("BIN", "BINX", "bin", "binx")%in%tail(
-    unlist(strsplit(file, split = "\\.")), n = 1)))){
+  if(!(TRUE%in%(c("BIN", "BINX", "bin", "binx")%in%sub(pattern = "%20", replacement = "", x = tail(
+    unlist(strsplit(file, split = "\\.")), n = 1), fixed = TRUE)))){
+
+    try(
+      stop(
+        paste0("[read_BIN2R()] '", file,"' is not a file or not of type 'BIN' or 'BINX'! Skipped!"),
+        call. = FALSE))
+    return(NULL)
+
+  }
 
-    stop("[read_BIN2R()] Input is not a file or not of type 'BIN' or 'BINX'!")
+  ##set correct file name of file_link was set
+  if(!is.null(file_link)){
+    file <- file_link
 
   }
 
@@ -299,7 +357,7 @@ read_BIN2R <- function(
 
 
   ##start for BIN-file check up
-  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="litte"))>0) {
+  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="little"))>0) {
 
      ##force version number
     if(!is.null(forced.VersionNumber)){
@@ -312,23 +370,20 @@ read_BIN2R <- function(
       if(temp.ID > 0){
 
         if(is.null(n.records)){
-          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. Import limited to the first ", temp.ID-1," records."))
+          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. Import limited to the first ", temp.ID," record(s)."))
 
         }else{
-          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. 'n.records' reset to ", temp.ID-1,"."))
+          warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. 'n.records' reset to ", temp.ID,"."))
 
         }
 
         ##set or reset n.records
-        n.records <- temp.ID-1
+        n.records <- temp.ID
         break()
 
       }else{
         ##show error message
-        error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file is currently not supported! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="")
-
-        ##close connection
-        close(con)
+        error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file seems to be not supported or the BIN-file is broken.! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="")
 
         ##show error
         stop(error.text)
@@ -338,20 +393,19 @@ read_BIN2R <- function(
     }
 
     #empty byte position
-    EMPTY<-readBin(con, what="raw", 1, size=1, endian="litte")
+    EMPTY<-readBin(con, what="raw", 1, size=1, endian="little")
 
     if(temp.VERSION == 06 | temp.VERSION == 07 | temp.VERSION == 08){
 
       ##GET record LENGTH
       temp.LENGTH  <- readBin(con, what="int", 1, size=4, endian="little")
-
-      STEPPING <- readBin(con, what="raw", temp.LENGTH-6, size=1, endian="litte")
+      STEPPING <- readBin(con, what="raw", temp.LENGTH-6, size=1, endian="little")
 
     }else{
 
       ##GET record LENGTH
       temp.LENGTH  <- readBin(con, what="int", 1, size=2, endian="little")
-      STEPPING <- readBin(con, what="raw", temp.LENGTH-4, size=1, endian="litte")
+      STEPPING <- readBin(con, what="raw", temp.LENGTH-4, size=1, endian="little")
 
     }
 
@@ -364,9 +418,6 @@ read_BIN2R <- function(
 
   }
 
-  ##close con
-  close(con)
-
   ##set n.records
   if(is.null(n.records)){
     n.records <- temp.ID
@@ -472,6 +523,7 @@ read_BIN2R <- function(
   ##initialise data.frame
   results.METADATA <- data.table::data.table(
 
+    ##1 to 7
     ID = integer(length = n.length),
     SEL = logical(length = n.length),
     VERSION = numeric(length = n.length),
@@ -480,6 +532,7 @@ read_BIN2R <- function(
     NPOINTS = integer(length = n.length),
     RECTYPE = integer(length = n.length),
 
+    #8 to 17
     RUN = integer(length = n.length),
     SET = integer(length = n.length),
     POSITION = integer(length = n.length),
@@ -491,12 +544,14 @@ read_BIN2R <- function(
     SAMPLE = character(length = n.length),
     COMMENT = character(length = n.length),
 
+    #18 to 22
     SYSTEMID = integer(length = n.length),
     FNAME = character(length = n.length),
     USER = character(length = n.length),
     TIME = character(length = n.length),
     DATE = character(length = n.length),
 
+    ##23 to 31
     DTYPE = character(length = n.length),
     BL_TIME = numeric(length = n.length),
     BL_UNIT = integer(length = n.length),
@@ -507,6 +562,7 @@ read_BIN2R <- function(
     SHIFT = integer(length = n.length),
     TAG = integer(length = n.length),
 
+    ##32 to 67
     LTYPE = character(length = n.length),
     LIGHTSOURCE = character(length = n.length),
     LPOWER = numeric(length = n.length),
@@ -544,6 +600,7 @@ read_BIN2R <- function(
     XRF_CURR = numeric(length = n.length),
     XRF_DEADTIMEF = numeric(length = n.length),
 
+    #68 to 79
     DETECTOR_ID = integer(length = n.length),
     LOWERFILTER_ID = integer(length = n.length),
     UPPERFILTER_ID = integer(length = n.length),
@@ -557,6 +614,7 @@ read_BIN2R <- function(
     EXTR_START = numeric(length = n.length),
     EXTR_END = numeric(length = n.length),
 
+    ##80
     SEQUENCE = character(length = n.length)
 
   ) #end set data table
@@ -577,7 +635,7 @@ read_BIN2R <- function(
   }
 
   #open connection
-  con<-file(file, "rb")
+  con <- file(file, "rb")
 
   ##get information about file size
   file.size<-file.info(file)
@@ -599,7 +657,7 @@ read_BIN2R <- function(
   # LOOP --------------------------------------------------------------------
 
   ##start loop for import BIN data
-  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="litte"))>0) {
+  while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="little"))>0) {
 
     ##force version number
     if(!is.null(forced.VersionNumber)){
@@ -609,9 +667,6 @@ read_BIN2R <- function(
     ##stop input if wrong VERSION
     if((temp.VERSION%in%VERSION.supported) == FALSE){
 
-      ##close connection
-      close(con)
-
       ##show error message
       error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file is currently not supported! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="")
 
@@ -634,7 +689,7 @@ read_BIN2R <- function(
 
 
     #empty byte position
-    EMPTY<-readBin(con, what="raw", 1, size=1, endian="litte")
+    EMPTY<-readBin(con, what="raw", 1, size=1, endian="little")
 
     # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     # BINX FORMAT SUPPORT -----------------------------------------------------
@@ -648,20 +703,32 @@ read_BIN2R <- function(
       temp.PREVIOUS <- temp[2]
       temp.NPOINTS <- temp[3]
 
-      ##for temp.VERSION == 08
-      ##RECTYPE
+      #for temp.VERSION == 08
+      #RECTYPE
       if(temp.VERSION == 08){
         temp.RECTYPE <- readBin(con, what="int", 1, size=1, endian="little", signed = FALSE)
-        if(temp.RECTYPE == 128){
-          STEPPING<-readBin(con, what="raw", temp.LENGTH)
+        if(temp.RECTYPE != 0 & temp.RECTYPE != 1){
+          ##jump to the next record by stepping the record length minus the alread read bytes
+          STEPPING <- readBin(con, what = "raw", size = 1, n = temp.LENGTH - 15)
+
+          if(temp.RECTYPE == 128){
+            warning(paste0("[read_BIN2R()] ROI definition in data set #",temp.ID+1, "detected, but currently not supported, record skipped!", call. = FALSE))
+
+          }else{
+            if(!ignore.RECTYPE){
+              stop(paste0("[read_BIN2R()] Byte RECTYPE = ",temp.RECTYPE," is not supported in record #",temp.ID+1,"! Check your BIN-file!"), call. = FALSE)
+
+            }else{
+              if(verbose) cat(paste0("\n[read_BIN2R()] Byte RECTYPE = ",temp.RECTYPE," is not supported in record #",temp.ID+1,", record skipped!"))
+              temp.ID <- temp.ID + 1
+            }
 
-          warning("[read_BIN2R()] ROI definition in data set detected, but currently not supported, skipped!", call. = FALSE)
+          }
 
-          next()
+          next
         }
       }
 
-
       ##(2) Sample characteristics
       ##RUN, SET, POSITION, GRAINNUMBER, CURVENO, XCOORD, YCOORD
       temp <- readBin(con, what="int", 7, size=2, endian="little")
@@ -1276,9 +1343,6 @@ read_BIN2R <- function(
 
   }#endwhile::end lopp
 
-  ##close con
-  close(con)
-
   ##close
   if(txtProgressBar & verbose){close(pb)}
 
@@ -1328,9 +1392,9 @@ read_BIN2R <- function(
 
       warning(
         paste0(
-          "[read_BIN2R()] zero data records detected and removed: ",
+          "\n[read_BIN2R()] ", length(zero_data.check), " zero data records detected and removed: ",
           paste(zero_data.check, collapse = ", "),
-          ". Record index re-calculated."
+          ". \n\n >> Record index re-calculated."
         )
       )
 
@@ -1465,8 +1529,6 @@ read_BIN2R <- function(
 
   }
 
-
    return(object)
 
-
 }
diff --git a/R/read_Daybreak2R.R b/R/read_Daybreak2R.R
index 229dac3..ab9b58e 100644
--- a/R/read_Daybreak2R.R
+++ b/R/read_Daybreak2R.R
@@ -1,12 +1,18 @@
-#' Import Daybreak ASCII dato into R
+#' Import measurement data produced by a Daybreak TL/OSL reader into R
 #'
-#' Import a *.txt (ASCII) file produced by a Daybreak reader into R.
+#' Import a TXT-file (ASCII file) or a DAT-file (binary file) produced by a Daybreak reader into R.
+#' The import of the DAT-files is limited to the file format described for the software TLAPLLIC v.3.2
+#' used for a Daybreak, model 1100.
 #'
 #' @param file \code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
 #' file to be imported. Alternatively a list of file names can be provided or just the path a folder
 #' containing measurement data. Please note that the specific, common, file extension (txt) is likely
 #' leading to function failures during import when just a path is provided.
 #'
+#' @param raw \code{\link{logical}} (with default): if the input is a DAT-file (binary) a
+#' \code{\link[data.table]{data.table}} instead of the \code{\linkS4class{RLum.Analysis}} object
+#' can be returned for debugging purposes.
+#'
 #' @param verbose \code{\link{logical}} (with default): enables or disables terminal feedback
 #'
 #' @param txtProgressBar \code{\link{logical}} (with default): enables or disables
@@ -14,14 +20,17 @@
 #'
 #' @return  A list of \code{\linkS4class{RLum.Analysis}} objects (each per position) is provided.
 #'
-#' @note \bold{[BETA VERSION]} This function version still needs to be properly tested.
+#' @note \bold{[BETA VERSION]} This function still needs to be tested properly. In particular
+#' the function has underwent only very rough rests using a few files.
 #'
-#' @section Function version: 0.2.1
+#' @section Function version: 0.3.0
 #'
-#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-#' (France)\cr Based on a suggestion by Willian Amidon and Andrew Louis Gorin.
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), \cr
+#' Anotine Zink, C2RMF, Palais du Louvre, Paris (France)\cr
+#' \cr The ASCII-file import is based on a suggestion by Willian Amidon and Andrew Louis Gorin
 #'
-#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\link[data.table]{data.table}}
 #'
 #' @references -
 #'
@@ -29,11 +38,16 @@
 #'
 #' @examples
 #'
-#' ## This function has no example yet.
+#' \dontrun{
+#' file <- file.choose()
+#' temp <- read_Daybreak2R(file)
+#'
+#' }
 #'
 #' @export
 read_Daybreak2R <- function(
   file,
+  raw = FALSE,
   verbose = TRUE,
   txtProgressBar = TRUE
 ){
@@ -41,7 +55,7 @@ read_Daybreak2R <- function(
   ##TODO
   ## - run tests
   ## - check where the warning messages are comming from
-  ## - implement further integegrity tests
+  ## - implement further integegrity tests  (ASCII import)
 
   # Self Call -----------------------------------------------------------------------------------
   # Option (a): Input is a list, every element in the list will be treated as file connection
@@ -81,181 +95,388 @@ read_Daybreak2R <- function(
   }
 
 
-
   # Integrity checks ----------------------------------------------------------------------------
 
   ##check if file exists
   if(!file.exists(file)){
-    stop("[read_Daybreak2R()] file name doesn't seem to exist.")
+    stop("[read_Daybreak2R()] file name does not seem to exist.", call. = FALSE)
 
   }
 
 
-  # Read ASCII file -----------------------------------------------------------------------------
+  ##check for file extension ... distinguish between TXT and DAT
+  if(substr(file, start = nchar(file) - 3, stop = nchar(file)) == ".DAT"){
+
+     # Read DAT-file ------------------------------------------------------------------------------
+
+      ##screen file to get information on the number of stored records
+      con<-file(file,"rb")
+      file.data <- file.info(file)
+      max.pt<-readBin(con,what="int",6,size=2,endian="little")[6]
+      file.size<-file.data$size
+      n.length<-file.size/(190+8*(max.pt+1)) ##190 is is size of the header for each data set
+      close(con)
+
+      ##import data
+      con <- file(file, "rb")
+
+      ##pre-define data.table
+      results.DATA <-
+        data.table::data.table(
+          ID = integer(length = n.length),
+          MAXPT = integer(length = n.length),
+          SPACING = integer(length = n.length),
+          NDISK = integer(length = n.length),
+          NRUN = integer(length = n.length),
+          D1 = integer(length = n.length),
+          NPT = integer(length = n.length),
+          NATL = logical(length = n.length),
+          TLRUN = logical(length = n.length),
+          BEFORE_IRRAD = logical(length = n.length),
+          SHIFT = double(length = n.length),
+          RAMPRATE = double(length = n.length),
+          GRATE = double(length = n.length),
+          BRATE = double(length = n.length),
+          ARATE = double(length = n.length),
+          GAMMADOSE = double(length = n.length),
+          BETADOSE = double(length = n.length),
+          ALPHADOSE = double(length = n.length),
+          BLEACHINGTIME = double(length = n.length),
+          GRUNIT = character(length = n.length),
+          BRUNIT = character(length = n.length),
+          ARUNIT = character(length = n.length),
+          BFILTER = character(length = n.length),
+          GSOURCE = character(length = n.length),
+          BSOURCE = character(length = n.length),
+          ASOURCE = character(length = n.length),
+          IRRAD_DATE = character(length = n.length),
+          RUNREMARK = character(length = n.length),
+          DATA = list()
+        )
+
+      ##TERMINAL FEEDBACK
+      if(verbose){
+        cat("\n[read_Daybreak2R()]")
+        cat(paste("\n >> Importing:", file[1],"\n"))
+      }
+
+      ##PROGRESS BAR
+      if(txtProgressBar & verbose){
+        pb <- txtProgressBar(min=0,max=n.length, char = "=", style=3)
+      }
+
+      ##LOOP over file
+      i <- 1
+      while (i<n.length){
+
+        #integer
+        ligne1<-readBin(con,what="int",6,size=2,endian="little")
+        i_NPT<-ligne1[1]
+        i_SPACING<-ligne1[2]
+        i_NRUN<-ligne1[3]
+        i_NDISK<-ligne1[4]
+        i_D1<-ligne1[5]
+        i_MAXPT<-ligne1[6]
+
+        #boolean
+        ligne2<-readBin(con,what="logical",3,size=2,endian="little")
+        i_BEFORE_IRRAD<-ligne2[1]
+        i_TLRUN<-ligne2[2]
+        i_NATL<-ligne2[3]
+
+        #double (real)
+        ligne3<-readBin(con,what="double",9,size=8,endian="little")
+        i_BLEACHINGTIME<-ligne3[1]#0
+        i_RAMPRATE<-ligne3[2] #5
+        i_GRATE<-ligne3[3]#0.00000
+        i_BRATE<-ligne3[4]#0.074800
+        i_ARATE<-ligne3[5]#0.114740
+        i_GAMMADOSE<-ligne3[6]#0
+        i_BETADOSE<-ligne3[7]#0
+        i_ALPHADOSE<-ligne3[8]#0
+        i_SHIFT<-ligne3[9] #0
+
+        #string[7]
+        i_GRUNIT<-substr(readChar(con, nchars=8, useBytes = TRUE),2,8)#""
+        i_BRUNIT<-substr(readChar(con, nchars=8, useBytes = FALSE),2,8) #Gy/sec
+        i_ARUNIT<-substr(readChar(con, nchars=8, useBytes = FALSE),2,8)#u-2/sec
+
+        #string[6]
+        i_BFILTER<-substr(readChar(con, nchars=8, useBytes = FALSE),2,7)#none
+        i_GSOURCE<-substr(readChar(con, nchars=8, useBytes = FALSE),2,7)#
+        i_BSOURCE<-substr(readChar(con, nchars=8, useBytes = FALSE),2,7)#Sr-90
+        i_ASOURCE<-substr(readChar(con, nchars=8, useBytes = FALSE),2,7)#Pu-238
+
+        #date record
+        raw_IRRAD_DATE<-readBin(con,what="raw",4,size=1,endian="little")#27-Nov-2006
+        bitDATE<-as.integer(rawToBits(raw_IRRAD_DATE))
+        DATE.AAAA<-sum(bitDATE[seq(12,16)]*c(1,2,4,8,16))+1980
+        DATE.MM<-sum(bitDATE[seq(1,4)]*c(1,2,4,8))
+        DATE.JJ<-sum(bitDATE[seq(5,9)]*c(1,2,4,8,16))
+        i_IRRAD_DATE<-paste0(DATE.AAAA,"-",MM=DATE.MM,"-",JJ=DATE.JJ)
+
+        #string[40]
+        i_RUNREMARK<-readChar(con, nchars=40, useBytes = FALSE)
+
+        i_DATA<-readBin(con,what="double",i_MAXPT+1,size=8,endian="little")
+
+        results.DATA[i,':='(ID=i,MAXPT=i_MAXPT,SPACING=i_SPACING,NDISK=i_NDISK,NRUN=i_NRUN,D1=i_D1,NPT=i_NPT,
+                            NATL=i_NATL,TLRUN=i_TLRUN,BEFORE_IRRAD=i_BEFORE_IRRAD,
+                            SHIFT=i_SHIFT,RAMPRATE=i_RAMPRATE,
+                            GRATE=i_GRATE,BRATE=i_BRATE,ARATE=i_ARATE,
+                            GAMMADOSE=i_GAMMADOSE,BETADOSE=i_BETADOSE,ALPHADOSE=i_ALPHADOSE,
+                            BLEACHINGTIME=i_BLEACHINGTIME,
+                            GRUNIT=i_GRUNIT,BRUNIT=i_BRUNIT, ARUNIT=i_ARUNIT,
+                            BFILTER=i_BFILTER,
+                            GSOURCE=i_GSOURCE,BSOURCE=i_BSOURCE,ASOURCE=i_ASOURCE,
+                            IRRAD_DATE=i_IRRAD_DATE,
+                            RUNREMARK=i_RUNREMARK,
+                            DATA=list(i_DATA))]
+
+        ##update
+        i <- i + 1
+
+        ##update progress bar
+        if (txtProgressBar & verbose) {
+          setTxtProgressBar(pb, i)
+        }
 
-  ##read file
-  file2read <- readLines(file)
+      }
 
-  ##(0) get rid off all the empty lines
-  file2read <- file2read[file2read != ""]
+      ##close connection
+      close(con)
 
-  ##(1)
-  ##get all rows with the term "[NewRecord]" - that's what we are interested in and it defines
-  ##the number of elements we need
-  records.row_number <- grep(pattern = "\\[NewRecord\\]", x = file2read)
+      ##close ProgressBar
+      if(txtProgressBar & verbose) close(pb)
 
-  ##(1)
-  ##make a list ... this is not essentially needed but it makes things easier
-  data.list <- lapply(1:length(records.row_number), function(x) {
 
-    ##grep each element
-    if (!is.na(records.row_number[x + 1])) {
-      return(file2read[records.row_number[x]:(records.row_number[x + 1] - 1)])
+      ## Output ... return an RLum.Analysis object or a data.table ... depending on what is wanted
+      if(raw){
+        return(results.DATA)
 
-    }else{
-      return(file2read[records.row_number[x]:length(file2read)])
+      ##Output RLum.Analysis
+      }else{
 
-    }
+       ##remove NULL entries, otherwise we have to deal with them later later
+       results.DATA <- results.DATA[!sapply(X = results.DATA[["DATA"]], is.null),]
 
-  })
+       ##we need a double loop, as one aliquot defines one object ...
+       output <- lapply(unique(results.DATA[["NDISK"]]), function(i){
 
-    ##clear memory
-    rm(file2read)
+          ##subset
+          DT <- results.DATA[results.DATA[["NDISK"]] == i,]
 
+          ##create list of records
+          records <- lapply(1:nrow(DT), function(j){
+            set_RLum(
+              class = "RLum.Data.Curve",
+              originator = "read_Daybreak2R",
+              recordType = NA_character_,
+              data = matrix(
+                data = c(
+                  seq(from = 0, by = DT[["SPACING"]][j], length.out = DT[["MAXPT"]][j] + 1),
+                  DT[["DATA"]][j][[1]]),
+                ncol = 2),
+              info = as.list(DT[j,1:(ncol(DT) - 1)])
+            )
+          })
 
-  ##TERMINAL FEEDBACK
-  if(verbose){
-    cat("\n[read_Daybreak2R()]")
-    cat(paste("\n >> Importing:", file[1],"\n"))
-  }
+          ##combine in RLum.Analysis object
+          temp <- set_RLum(
+            class = "RLum.Analysis",
+            originator = "read_Daybreak2R",
+            records =  records
+            )
 
-  ##PROGRESS BAR
-  if(txtProgressBar & verbose){
-    pb <- txtProgressBar(min=0,max=length(data.list), char = "=", style=3)
-  }
+          ##set pid and return
+          return(.set_pid(temp))
 
-  ##(2)
-  ##Loop over the list to create RLum.Data.Curve objects
-  RLum.Data.Curve.list <- lapply(1:length(data.list), function(x){
+          })
 
+      ##return object
+      return(output)
 
-    ##get length of record
-    record.length <- length(data.list[[x]])
+      }
 
-    ##get header length until the argument 'Points'
-    header.length <- grep(pattern = "Points", x = data.list[[x]])
+  }else{
 
-    if(length(header.length)>0){
-      temp.meta_data <- unlist(strsplit(data.list[[x]][2:header.length], split = "=", fixed = TRUE))
+    # Read ASCII file -----------------------------------------------------------------------------
 
-    }else{
-      temp.meta_data <- unlist(strsplit(data.list[[x]][2:length(data.list[[x]])], split = "=", fixed = TRUE))
+    if(verbose){
+      cat("\n[read_Daybreak] >> file extension not of type '.DAT' try to import ASCII-file ...")
 
     }
 
-    ##get list names for the info element list
-    info.names <- temp.meta_data[seq(1,length(temp.meta_data), by = 2)]
+    ##read file
+    file2read <- suppressWarnings(readLines(file))
 
-    ##info elements
-    info <- as.list(temp.meta_data[seq(2,length(temp.meta_data), by = 2)])
-    names(info) <- info.names
+    ##check whether this is a binary file
+    if(all(charToRaw(file2read[1]) != as.raw(127))){
+      stop("[read_Daybreak2R()] The provided file is no ASCII-file and cannot be imported!", call. = FALSE)
 
-    ##add position, which is 'Disk'
-    info <- c(info, position = as.integer(info$Disk))
+    }
 
-    if(length(header.length)>0){
-      ##get measurement data
-      temp.data <- unlist(strsplit(unlist(strsplit(
-        data.list[[x]][12:length(data.list[[x]])], split = "="
-      )), split = ";"))
+    ##(0) get rid off all the empty lines
+    file2read <- file2read[file2read != ""]
 
-      ##grep only data of interest
-      point.x <-
-        suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(2, length(temp.data), by = 4)])))
-      point.y <-
-        suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(3,length(temp.data), by = 4)])))
+    ##(1)
+    ##get all rows with the term "[NewRecord]" - that's what we are interested in and it defines
+    ##the number of elements we need
+    records.row_number <- grep(pattern = "\\[NewRecord\\]", x = file2read)
 
+    ##(1)
+    ##make a list ... this is not essentially needed but it makes things easier
+    data.list <- lapply(1:length(records.row_number), function(x) {
 
-      ##combine it into a matrix
-      data <- matrix(c(point.x,point.y), ncol = 2)
+      ##grep each element
+      if (!is.na(records.row_number[x + 1])) {
+        return(file2read[records.row_number[x]:(records.row_number[x + 1] - 1)])
 
-    }else{
+      }else{
+        return(file2read[records.row_number[x]:length(file2read)])
 
-      ##we presume this should be irradiation ...
-      if ("IrradTime" %in% names(info)) {
+      }
 
-        point.x <- 1:as.numeric(info$IrradTime)
-        point.y <- rep(1, length(point.x))
+    })
 
-        data <- matrix(c(point.x,point.y), ncol = 2)
+      ##clear memory
+      rm(file2read)
 
-      }
 
+    ##TERMINAL FEEDBACK
+    if(verbose){
+      cat("\n[read_Daybreak2R()]")
+      cat(paste("\n >> Importing:", file[1],"\n"))
     }
 
-    ##update progress bar
-    if (txtProgressBar & verbose) {
-      setTxtProgressBar(pb, x)
+    ##PROGRESS BAR
+    if(txtProgressBar & verbose){
+      pb <- txtProgressBar(min=0,max=length(data.list), char = "=", style=3)
     }
 
-    ##return RLum object
-    return(
-      set_RLum(
-        class = "RLum.Data.Curve",
-        originator = "read_Daybreak2R",
-        recordType = sub(" ", replacement = "_", x = info$DataType),
-        curveType = "measured",
-        data = data,
-        info = info
-      )
-    )
+    ##(2)
+    ##Loop over the list to create RLum.Data.Curve objects
+    RLum.Data.Curve.list <- lapply(1:length(data.list), function(x){
 
-  })
 
-  ##close ProgressBar
-  if(txtProgressBar & verbose){close(pb)}
+      ##get length of record
+      record.length <- length(data.list[[x]])
 
-  ##(3)
-  ##Now we have to find out how many aliquots we do have
-  positions.id <-  sapply(RLum.Data.Curve.list, function(x){
+      ##get header length until the argument 'Points'
+      header.length <- grep(pattern = "Points", x = data.list[[x]])
 
-    get_RLum(x, info.object = "position")
+      if(length(header.length)>0){
+        temp.meta_data <- unlist(strsplit(data.list[[x]][2:header.length], split = "=", fixed = TRUE))
 
-  })
+      }else{
+        temp.meta_data <- unlist(strsplit(data.list[[x]][2:length(data.list[[x]])], split = "=", fixed = TRUE))
 
-  ##(4)
-  ##now combine everyting in an RLum.Analysis object in accordance to the position number
-  RLum.Analysis.list <- lapply(unique(positions.id), function(x){
+      }
 
-    ##get list ids for position number
-    n <- which(positions.id == x)
+      ##get list names for the info element list
+      info.names <- temp.meta_data[seq(1,length(temp.meta_data), by = 2)]
 
-    ##make list
-    temp.list <- lapply(n, function(x){
-      RLum.Data.Curve.list[[x]]
+      ##info elements
+      info <- as.list(temp.meta_data[seq(2,length(temp.meta_data), by = 2)])
+      names(info) <- info.names
+
+      ##add position, which is 'Disk'
+      info <- c(info, position = as.integer(info$Disk))
+
+      if(length(header.length)>0){
+        ##get measurement data
+        temp.data <- unlist(strsplit(unlist(strsplit(
+          data.list[[x]][12:length(data.list[[x]])], split = "="
+        )), split = ";"))
+
+        ##grep only data of interest
+        point.x <-
+          suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(2, length(temp.data), by = 4)])))
+        point.y <-
+          suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(3,length(temp.data), by = 4)])))
+
+
+        ##combine it into a matrix
+        data <- matrix(c(point.x,point.y), ncol = 2)
+
+      }else{
+
+        ##we presume this should be irradiation ...
+        if ("IrradTime" %in% names(info)) {
+
+          point.x <- 1:as.numeric(info$IrradTime)
+          point.y <- rep(1, length(point.x))
+
+          data <- matrix(c(point.x,point.y), ncol = 2)
+
+        }
+
+      }
+
+      ##update progress bar
+      if (txtProgressBar & verbose) {
+        setTxtProgressBar(pb, x)
+      }
+
+      ##return RLum object
+      return(
+        set_RLum(
+          class = "RLum.Data.Curve",
+          originator = "read_Daybreak2R",
+          recordType = sub(" ", replacement = "_", x = info$DataType),
+          curveType = "measured",
+          data = data,
+          info = info
+        )
+      )
 
     })
 
-    ##put in RLum.Analysis object
-    object <- set_RLum(
-      class = "RLum.Analysis",
-      originator = "read_Daybreak2R",
-      protocol = "Custom",
-      records = temp.list
-    )
+    ##close ProgressBar
+    if(txtProgressBar & verbose){close(pb)}
 
-    ##set parent id of records
-    object <- .set_pid(object)
+    ##(3)
+    ##Now we have to find out how many aliquots we do have
+    positions.id <-  sapply(RLum.Data.Curve.list, function(x){
 
-    return(object)
+      get_RLum(x, info.object = "position")
 
+    })
 
-  })
+    ##(4)
+    ##now combine everyting in an RLum.Analysis object in accordance to the position number
+    RLum.Analysis.list <- lapply(unique(positions.id), function(x){
 
-  ##TERMINAL FEEDBACK
-  if(verbose){
-    cat(paste0("\n ",length(unlist(get_RLum(RLum.Analysis.list))), " records have been read sucessfully!\n"))
-  }
+      ##get list ids for position number
+      n <- which(positions.id == x)
 
-  return(RLum.Analysis.list)
+      ##make list
+      temp.list <- lapply(n, function(x){
+        RLum.Data.Curve.list[[x]]
+
+      })
+
+      ##put in RLum.Analysis object
+      object <- set_RLum(
+        class = "RLum.Analysis",
+        originator = "read_Daybreak2R",
+        protocol = "Custom",
+        records = temp.list
+      )
+
+      ##set parent id of records
+      object <- .set_pid(object)
+
+      return(object)
+
+
+    })
+
+    ##TERMINAL FEEDBACK
+    if(verbose){
+      cat(paste0("\n ",length(unlist(get_RLum(RLum.Analysis.list))), " records have been read sucessfully!\n"))
+    }
+
+    return(RLum.Analysis.list)
+    }
 }
diff --git a/R/read_PSL2R.R b/R/read_PSL2R.R
new file mode 100644
index 0000000..333d79e
--- /dev/null
+++ b/R/read_PSL2R.R
@@ -0,0 +1,313 @@
+#' Import PSL files to R
+#' 
+#' Imports PSL files produced by a SUERC portable OSL reader into R \bold{(BETA)}.
+#'
+#' This function provides an import routine for the SUERC portable OSL Reader PSL format.
+#' PSL files are just plain text and can be viewed with any text editor. Due to the 
+#' formatting of PSL files this import function relies heavily on regular expression to find and 
+#' extract all relevant information. See \bold{note}.
+#'
+#' @param file \code{\link{character}} (\bold{required}): path and file name of the
+#' PSL file. If input is a \code{vector} it should comprise only \code{character}s representing
+#' valid paths and PSL file names.
+#' Alternatively the input character can be just a directory (path). In this case the
+#' the function tries to detect and import all PSL files found in the directory.
+#'
+#' @param drop_bg \code{\link{logical}} (with default): \code{TRUE} to automatically 
+#' remove all non-OSL/IRSL curves.
+#'
+#' @param as_decay_curve  \code{\link{logical}} (with default): Portable OSL Reader curves
+#' are often given as cumulative light sum curves. Use \code{TRUE} (default) to convert
+#' the curves to the more usual decay form.
+#' 
+#' @param smooth \code{\link{logical}} (with default): \code{TRUE} to apply 
+#' Tukey's Running Median Smoothing for OSL and IRSL decay curves. Smoothing is
+#' encouraged if you see random signal drops within the decay curves related 
+#' to hardware errors.
+#' 
+#' @param merge \code{\link{logical}} (with default): \code{TRUE} to merge all 
+#' \code{RLum.Analysis} objects. Only applicable if multiple files are imported.
+#' 
+#' @param ... currently not used.
+#'
+#' @return Returns an S4 \code{\linkS4class{RLum.Analysis}} object containing
+#' \code{\linkS4class{RLum.Data.Curve}} objects for each curve.  
+#' 
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+#' \code{\linkS4class{RLum.Data.Curve}}
+#'
+#' @author Christoph Burow, University of Cologne (Germany)
+#'
+#' @section Function version: 0.0.1
+#' 
+#' @note Because this function relies heavily on regular expressions to parse 
+#' PSL files it is currently only in beta status. If the routine fails to import
+#' a specific PSL file please report to <christoph.burow@@uni-koeln.de> so the
+#' function can be updated.
+#'
+#' @keywords IO
+#' 
+#' @examples
+#' 
+#' # (1) Import PSL file to R
+#' 
+#' \dontrun{
+#' FILE <- file.choose()
+#' temp <- read_PSL2R(FILE)
+#' temp
+#' }
+#' 
+#' @export
+read_PSL2R <- function(file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = FALSE, merge = FALSE, ...) {
+  
+  ## INPUT VALIDATION ----
+  if (length(file) == 1) {
+    if (!grepl(".psl$", file, ignore.case = TRUE)) {
+      file <- list.files(file, pattern = ".psl$", full.names = TRUE, ignore.case = TRUE) 
+      message("The following files were found and imported: \n", paste(file, collapse = "\n"))
+    }
+  }
+  if (!all(file.exists(file)))
+    stop("The following files do not exist, please check: \n",
+         paste(file[!file.exists(file)], collapse = "\n"), call. = FALSE)
+    
+  ## MAIN ----
+  results <- vector("list", length(file))
+  
+  for (i in 1:length(file)) {
+    
+    ## Read in file ----
+    doc <- readLines(file[i])
+    
+    ## Document formatting ----
+    # remove lines with i) blanks only, ii) dashes, iii) equal signs
+    doc <- gsub("^[ ]*$", "", doc)
+    doc <- gsub("^[ -]*$", "", doc)
+    doc <- gsub("^[ =]*$", "", doc)
+    
+    # the header ends with date and time with the previous line starting with a single slash
+    lines_with_slashes <- doc[grepl("\\", doc, fixed = TRUE)]
+
+    ## OFFENDING LINE: this deletes the line with sample name and time and date
+    sample_and_date <- lines_with_slashes[length(lines_with_slashes)]
+    sample <- gsub("[^0-9a-zA-Z\\-_]", "",strsplit(sample_and_date, "@")[[1]][1], perl = TRUE)
+    date_and_time <- strsplit(strsplit(sample_and_date, "@")[[1]][2], " ")[[1]]
+    date_and_time_clean <- date_and_time[date_and_time != "" & date_and_time != "/" & date_and_time != "PM" & date_and_time != "AM"]
+    date <- as.Date(date_and_time_clean[1], "%m/%d/%Y")
+    time <- format(date_and_time_clean[2], format = "%h:%M:%S")
+    doc <- gsub(lines_with_slashes[length(lines_with_slashes)], 
+                "", fixed = TRUE, doc)
+    
+    # last delimiting line before measurements are only apostrophes and dashes
+    lines_with_apostrophes <- doc[grepl("'", doc, fixed = TRUE)]
+    doc <- gsub(lines_with_apostrophes[length(lines_with_apostrophes)],
+                "", fixed = TRUE, doc)
+    
+    # finally remove all empty lines
+    doc <- doc[doc != ""]
+    
+    ## Split document ----
+    begin_of_measurements <- grep("Measurement :", doc, fixed = TRUE)
+    number_of_measurements <- length(begin_of_measurements)
+    
+    # Parse and format header
+    header <- doc[1:(begin_of_measurements[1]-1)]
+    header <- format_Header(header)
+    
+    # add sample name, date and time to header list
+    header$Date <- date
+    header$Time <- time
+    header$Sample <- sample
+    
+    # Parse and format the easurement values
+    measurements_split <- vector("list", number_of_measurements)
+    
+    # save lines of each measurement to individual list elements
+    for (j in seq_len(number_of_measurements)) {
+      if (j != max(number_of_measurements))
+        measurements_split[[j]] <- doc[begin_of_measurements[j]:(begin_of_measurements[j+1] - 1)]
+      else 
+        measurements_split[[j]] <- doc[begin_of_measurements[j]:length(doc)]
+    }
+    
+    # format each measurement; this will return a list of RLum.Data.Curve objects
+    measurements_formatted <- lapply(measurements_split, function(x) { 
+      format_Measurements(x, convert = as_decay_curve, header = header)
+    })
+    
+    # drop dark count measurements if needed
+    if (drop_bg) {
+      measurements_formatted <- lapply(measurements_formatted, function(x) {
+        if (x at recordType != "USER")
+          return(x)
+      })
+      measurements_formatted <- measurements_formatted[!sapply(measurements_formatted, is.null)]
+    }
+    
+    # decay curve smoothing using Tukey's Running Median Smoothing (?smooth)
+    if (smooth) {
+      measurements_formatted <- lapply(measurements_formatted, function(x) {
+        if (x at recordType != "USER")
+          x at data[,2] <- smooth(x at data[ ,2])
+        return(x)
+      })
+    }
+    
+    ## RETURN ----
+    results[[i]] <- set_RLum("RLum.Analysis",
+                             protocol = "portable OSL",
+                             info = header,
+                             records = measurements_formatted)
+  }#Eof::Loop
+  
+  ## MERGE ----
+  if (length(results) > 1 && merge)
+    results <- merge_RLum(results)
+  
+  ## RETURN ----
+  if (length(results) == 1)
+    results <- results[[1]]
+  
+  return(results)
+}  
+
+################################################################################
+## HELPER FUNCTIONS
+################################################################################
+
+
+## ------------------------- FORMAT MEASUREMENT ----------------------------- ##
+format_Measurements <- function(x, convert, header) {
+  
+  
+  ## measurement parameters are given in the first line
+  settings <- x[1]
+  
+  settings_split <- unlist(strsplit(settings, "|", fixed = TRUE))
+  
+  # welcome to regex/strsplit hell
+  settings_measurement <- trimws(gsub(".*: ", "", settings_split[which(grepl("Measure", settings_split))]))
+  settings_stimulation_unit <- gsub("[^0-9]", "", settings_split[which(grepl("Stim", settings_split))])
+  settings_on_time <- as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("Off", settings_split))]), ","))[1])
+  settings_off_time <- as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("Off", settings_split))]), ","))[2])
+  settings_cycle <- na.omit(as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("No", settings_split))]), ","))))[1]
+  settings_stimulation_time <- na.omit(as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("No", settings_split))]), ","))))[2]
+  
+  settings_list <- list("measurement" = settings_measurement,
+                        "stimulation_unit" = switch(settings_stimulation_unit, "0" = "USER", "1" = "IRSL", "2" = "OSL"),
+                        "on_time" = settings_on_time,
+                        "off_time" = settings_off_time,
+                        "cycle" = settings_cycle,
+                        "stimulation_time" = settings_stimulation_time)
+  
+  ## terminal counts are given in the last line
+  terminal_count_text <- x[length(x)]
+  
+  terminal_count_text_formatted <- gsub("[^0-9]", "", 
+                                        unlist(strsplit(terminal_count_text, "/")))
+  
+  terminal_count <- as.numeric(terminal_count_text_formatted[1])
+  terminal_count_error <- as.numeric(terminal_count_text_formatted[2])
+  
+  
+  ## parse values and create a data frame
+  x_stripped <- x[-c(1, 2, length(x))]
+  
+  df <- data.frame(matrix(NA, ncol = 5, nrow = length(x_stripped)))
+  
+  for (i in 1:length(x_stripped)) {
+    x_split <- unlist(strsplit(x_stripped[i], " "))
+    x_split <- x_split[x_split != ""]
+    x_split_clean <- gsub("[^0-9\\-]", "", x_split)
+    x_split_cleaner <- x_split_clean[x_split_clean != "-"]
+    
+    df[i, ] <- as.numeric(x_split_cleaner)
+  }
+  
+  names(df) <- c("time", "counts", "counts_error", 
+                 "counts_per_cycle", "counts_per_cycle_error")
+  
+  
+  # shape of the curve: decay or cumulative
+  if (convert)
+    data <- matrix(c(df$time, df$counts_per_cycle), ncol = 2)
+  else 
+    data <- matrix(c(df$time, df$counts), ncol = 2)
+  
+  # determine the stimulation type
+  if (grepl("Stim 0", settings)) {
+    recordType <- "USER"
+  } 
+  if (grepl("Stim 1", settings)) {
+    recordType <- "IRSL"
+  }
+  if (grepl("Stim 2", settings)) {
+    recordType <- "OSL"
+  }
+  
+  object <- set_RLum(class = "RLum.Data.Curve",
+                     originator = "read_PSL2R",
+                     recordType = recordType,
+                     curveType = "measured",
+                     data = data,
+                     info = list(settings = c(settings_list, header),
+                                 raw_data = df))
+  
+  return(object)
+  
+}
+
+## ---------------------------- FORMAT HEADER ------------------------------- ##
+format_Header <- function(x) {
+  
+  header_formatted <- list()
+  
+  # split by double blanks 
+  header_split <- strsplit(x, "  ", fixed = TRUE)
+  
+  # check wether there are twice as many values
+  # as colons; if there is an equal amount, the previous split was not sufficient
+  # and we need to further split by a colon (that is followed by a blank)
+  header_split_clean <- lapply(header_split, function(x) {
+    
+    x <- x[x != ""]
+    n_elements <- length(x)
+    n_properties <- length(grep(":", x, fixed = TRUE))
+    
+    if (n_elements / n_properties == 1)
+      x <- unlist(strsplit(x, ": ", fixed = TRUE))
+    
+    return(x)
+  })
+  
+  
+  # format parameter/settings names and corresponding values
+  values <- vector(mode = "character")
+  names <- vector(mode = "character")
+  
+  for (i in 1:length(header_split_clean)) {
+    for (j in seq(1, length(header_split_clean[[i]]), 2)) {
+      names <- c(names, header_split_clean[[i]][j])
+      values <- c(values, header_split_clean[[i]][j + 1])
+    }
+  }
+  
+  # some RegExing for nice reading
+  names <- gsub("[: ]$", "", names, perl = TRUE)
+  names <- gsub("^ ", "", names)
+  names <- gsub(" $", "", names)
+  # for some weird reason "offset subtract" starts with '256 '
+  names <- gsub("256 ", "", names)
+  # finally, replace all blanks with underscores
+  names <- gsub(" ", "_", names)
+  
+  values <- gsub("[: ]$", "", values, perl = TRUE)
+  values <- gsub("^ ", "", values)
+  values <- gsub(" $", "", values)
+  
+  # return header as list
+  header <- as.list(values)
+  names(header) <- names
+  
+  return(header) 
+}
\ No newline at end of file
diff --git a/R/read_SPE2R.R b/R/read_SPE2R.R
index e1b3740..e0601e0 100644
--- a/R/read_SPE2R.R
+++ b/R/read_SPE2R.R
@@ -62,8 +62,7 @@
 #' \code{\link[raster]{raster}}
 #'
 #' @references Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File
-#' Format Specification, Version 1.A,
-#' \url{ftp://ftp.princetoninstruments.com/Public/Manuals/Princeton\%20Instruments/SPE\%203.0\%20File\%20Format\%20Specification.pdf}
+#' Format Specification, Version 1.A (for document URL please use an internet search machine)
 #'
 #' Hall, C., 2012: readSPE.m.
 #' \url{http://www.mathworks.com/matlabcentral/fileexchange/35940-readspe/content/readSPE.m}
diff --git a/R/read_XSYG2R.R b/R/read_XSYG2R.R
index 063ba49..3aa88c7 100644
--- a/R/read_XSYG2R.R
+++ b/R/read_XSYG2R.R
@@ -98,7 +98,7 @@
 #' the XSXG file are skipped.
 #'
 #'
-#' @section Function version: 0.5.7
+#' @section Function version: 0.5.8
 #'
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
@@ -216,14 +216,15 @@ read_XSYG2R <- function(
     return(NULL)
   }
 
-  ##check if file is XML file
-  if(tail(unlist(strsplit(file, split = "\\.")), 1) != "xsyg" &
-     tail(unlist(strsplit(file, split = "\\.")), 1) != "XSYG" ){
-
-    warning("[read_XSYG2R()] File is not of type 'XSYG', nothing imported!")
-    return(NULL)
-
-  }
+  #TODO to be included again in a future version, if the format is given in the file itself
+  # ##check if file is XML file
+  # if(tail(unlist(strsplit(file, split = "\\.")), 1) != "xsyg" &
+  #    tail(unlist(strsplit(file, split = "\\.")), 1) != "XSYG" ){
+  #
+  #   warning("[read_XSYG2R()] File is not of type 'XSYG', nothing imported!")
+  #   return(NULL)
+  #
+  # }
 
   # (0) config --------------------------------------------------------------
   #version.supported <- c("1.0")
diff --git a/R/set_RLum.R b/R/set_RLum.R
index 996b8ce..b65a2de 100644
--- a/R/set_RLum.R
+++ b/R/set_RLum.R
@@ -65,8 +65,15 @@ setGeneric("set_RLum", function (class, originator, .uid = .create_UID(), .pid =
   class(class) <- as.character(class)
 
   if(missing(originator)) {
-    if (is(sys.call(which = -1)[[1]], "name")) {
+    if (is(sys.call(which = -1)[[1]], "language")) {
       originator <- as.character(sys.call(which = -1)[[1]])
+
+      ##account for calls using the double colons, in this case the vector is
+      ##of length 3, not only 1
+      if(length(originator) == 3){
+        originator <- originator[3]
+      }
+
     } else{
       originator <- NA_character_
     }
diff --git a/R/set_Risoe.BINfileData.R b/R/set_Risoe.BINfileData.R
index 2409e71..c050236 100644
--- a/R/set_Risoe.BINfileData.R
+++ b/R/set_Risoe.BINfileData.R
@@ -18,8 +18,11 @@
 #' @seealso
 #' \code{\linkS4class{Risoe.BINfileData}}
 #' @keywords utilities
-#' 
+#'
 #' @export
-setGeneric("set_Risoe.BINfileData", function(METADATA, DATA, .RESERVED) {
-  standardGeneric("set_Risoe.BINfileData")
-})
+setGeneric("set_Risoe.BINfileData",
+  function(METADATA = data.frame(), DATA = list(), .RESERVED = list()) {
+    standardGeneric("set_Risoe.BINfileData")
+  },
+  package = "Luminescence"
+)
diff --git a/R/smooth_RLum.R b/R/smooth_RLum.R
new file mode 100644
index 0000000..5310585
--- /dev/null
+++ b/R/smooth_RLum.R
@@ -0,0 +1,73 @@
+#' Smoothing of data
+#'
+#' Function calls the object-specific smooth functions for provided RLum S4-class objects.
+#'
+#' The function provides a generalised access point for specific
+#' \code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+#' corresponding function will be selected. Allowed arguments can be found
+#' in the documentations of the corresponding \code{\linkS4class{RLum}} class. The smoothing
+#' is based on an internal function called \code{.smoothing}.
+#'
+#' @param object \code{\linkS4class{RLum}} (\bold{required}): S4 object of
+#' class \code{RLum}
+#'
+#' @param ... further arguments passed to the specifc class method
+#'
+#' @return An object of the same type as the input object is provided
+#'
+#' @section Function version: 0.1.0
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+#' (France)
+#'
+#' @note Currenlty only \code{RLum} objects of class \code{RLum.Data.Curve} and \code{RLum.Analysis} (with curve data) are supported!
+#'
+#' @seealso
+#' \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Analysis}}
+#'
+#' @examples
+#'
+#' ##load example data
+#' data(ExampleData.CW_OSL_Curve, envir = environment())
+#'
+#' ##create RLum.Data.Curve object from this example
+#' curve <-
+#'   set_RLum(
+#'       class = "RLum.Data.Curve",
+#'       recordType = "OSL",
+#'       data = as.matrix(ExampleData.CW_OSL_Curve)
+#'   )
+#'
+#' ##plot data without and with smoothing
+#' plot_RLum(curve)
+#' plot_RLum(smooth_RLum(curve))
+#'
+#' @keywords utilities
+#'
+#' @export
+setGeneric("smooth_RLum", function(object, ...) {
+  standardGeneric("smooth_RLum")
+
+})
+
+# Method for smooth_RLum method for RLum objects in a list for a list of objects  -------------------
+#' @describeIn smooth_RLum
+#' Returns a list of \code{\linkS4class{RLum}} objects that had been passed to \code{\link{smooth_RLum}}
+#'
+#'
+#' @export
+setMethod("smooth_RLum",
+signature = "list",
+function(object, ...){
+
+  ##apply method in the objects and return the sampe
+  lapply(object, function(x){
+    if(inherits(x, "RLum")){
+      return(smooth_RLum(x,...))
+    }else{
+      return(x)
+    }
+
+  })
+
+})
diff --git a/R/template_DRAC.R b/R/template_DRAC.R
index 3fc7eea..9c1fa08 100644
--- a/R/template_DRAC.R
+++ b/R/template_DRAC.R
@@ -37,7 +37,7 @@
 #' input$`Project ID` <- "DRAC-Example"
 #' input$`Sample ID` <- "Quartz"
 #' input$`Conversion factors` <- "AdamiecAitken1998"
-#' input$`ExternalU (ppm)` <- 3.4
+#' input$`External U (ppm)` <- 3.4
 #' input$`errExternal U (ppm)` <- 0.51
 #' input$`External Th (ppm)` <- 14.47
 #' input$`errExternal Th (ppm)` <- 1.69
@@ -119,7 +119,7 @@ template_DRAC <- function(nrow = 1, notification = TRUE) {
       structure(factor(rep("Liritzisetal2013", nrow), c("AdamiecAitken1998", "Guerinetal2011", "Liritzisetal2013", "X")), required = FALSE, allowsX = TRUE, key = "TI:4",
                 description = "The conversion factors required to calculate dose rates from radionuclide concentrations. Users have the option of datasets from Adamiec and Aitken (1998), Guerin et al. (2011) or Liritzis et al. (2013). Input must be 'AdamiecAitken1998', 'Guerinetal2011', 'Liritzisetal2013' or 'X' if conversion factors are not required."), #
     
-    `ExternalU (ppm)` = 
+    `External U (ppm)` = 
       structure(rep(0, nrow), required = FALSE, allowsX = TRUE, key = "TI:5",
                 description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # 
     
@@ -273,31 +273,31 @@ template_DRAC <- function(nrow = 1, notification = TRUE) {
     
     `Depth (m)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:43",
-                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank."), #
     
     `errDepth (m)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:44",
-                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+                description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank."), #
     
     `Overburden density (g cm-3)` = 
-      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:45",
-                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. If user defined Dc will be used then an 'X' must be input."), #
+      structure(rep(1.8, nrow), required = TRUE, allowsX = FALSE, key = "TI:45",
+                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. The scaling calculation will use the overburden density and uncertainty provided."), #
     
     `errOverburden density (g cm-3)` = 
-      structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:46",
-                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. If user defined Dc will be used then an 'X' must be input."), #
+      structure(rep(0.1, nrow), required = TRUE, allowsX = FALSE, key = "TI:46",
+                description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. The scaling calculation will use the overburden density and uncertainty provided."), #
     
     `Latitude (decimal degrees)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:47",
-                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude. If user defined Dc will be used then an 'X' must be input."), # 
+                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude."), # 
     
     `Longitude (decimal degrees)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:48",
-                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude. If user defined Dc will be used then an 'X' must be input."), # 
+                description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude."), # 
     
     `Altitude (m)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:49",
-                description = "Altitude of sample location in metres above sea level. Input should be less than 5000 and not left blank. If user defined Dc will be used then an 'X' must be input."), #
+                description = "Altitude of sample location in metres above sea level. Input should be less than 5000 and not left blank."), #
     
     `User cosmicdoserate (Gy.ka-1)` = 
       structure(rep("X", nrow), required = FALSE, allowsX = TRUE, key = "TI:50",
diff --git a/R/use_DRAC.R b/R/use_DRAC.R
index dcedf04..93d8d1a 100644
--- a/R/use_DRAC.R
+++ b/R/use_DRAC.R
@@ -34,7 +34,7 @@
 #'
 #' The output should be accessed using the function \code{\link{get_RLum}}.
 #'
-#' @section Function version: 0.1.0
+#' @section Function version: 0.1.1
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Michael Dietze,
 #' GFZ Potsdam (Germany), Christoph Burow, University of Cologne (Germany)\cr
@@ -48,7 +48,7 @@
 #'
 #' ## (1) Method using the DRAC spreadsheet
 #'
-#' file <-  "/PATH/TO/DRAC_Input_and_Output_Template.xlsx"
+#' file <-  "/PATH/TO/DRAC_Input_Template.csv"
 #'
 #' # send the actual IO template spreadsheet to DRAC
 #' \dontrun{
@@ -66,7 +66,7 @@
 #' input$`Project ID` <- "DRAC-Example"
 #' input$`Sample ID` <- "Quartz"
 #' input$`Conversion factors` <- "AdamiecAitken1998"
-#' input$`ExternalU (ppm)` <- 3.4
+#' input$`External U (ppm)` <- 3.4
 #' input$`errExternal U (ppm)` <- 0.51
 #' input$`External Th (ppm)` <- 14.47
 #' input$`errExternal Th (ppm)` <- 1.69
@@ -106,53 +106,65 @@ use_DRAC <- function(
   ##
   ## (2)
   ## Leave it to the user where the calculations made in our package should be used
-
+  
   # Integrity tests -----------------------------------------------------------------------------
   if (inherits(file, "character")) {
     if(!file.exists(file)){
       stop("[use_DRAC()] It seems that the file doesn't exist!")
-
+      
     }
-
+    
     # Import data ---------------------------------------------------------------------------------
-
-    ## Import and skipt the first rows and remove NA lines and the 2 row, as this row contains
+    
+    ## Import and skip the first rows and remove NA lines and the 2 row, as this row contains
     ## only meta data
-
+    
+    ## DRAC v1.1 - XLS sheet
     ##check if is the original DRAC table
-    if (readxl::excel_sheets(file)[1] != "DRAC_1.1_input") {
-      stop("[use_DRAC()] It looks like that you are not using the original DRAC XLSX template. This is currently
-         not supported!")
+    if (tools::file_ext(file) == "xls" || tools::file_ext(file) == "xlsx") {
+      if (readxl::excel_sheets(file)[1] != "DRAC_1.1_input")
+        stop("[use_DRAC()] It looks like that you are not using the original DRAC v1.1 XLSX template. This is currently not supported!")
+      
+      warning("\n[use_DRAC()] The current DRAC version is 1.2, but you provided the v1.1 excel input template.", 
+              "\nPlease transfer your data to the new CSV template introduced with DRAC v1.2.", call. = FALSE)
+      input.raw <- na.omit(as.data.frame(readxl::read_excel(path = file, sheet = 1, skip = 5)))[-1, ]
     }
-    input.raw <- na.omit(as.data.frame(readxl::read_excel(path = file, sheet = 1, skip = 5)))[-1, ]
-
+    
+    ## DRAC v1.2 - CSV sheet
+    if (tools::file_ext(file) == "csv") {
+      if (read.csv(file, nrows = 1, header = FALSE)[1] != "DRAC v.1.2 Inputs")
+        stop("[use_DRAC()] It looks like that you are not using the original DRAC v1.2 CSV template. This is currently not supported!")
+      
+      input.raw <- read.csv(file, skip = 8, check.names = FALSE, header = TRUE, stringsAsFactors = FALSE)[-1, ]
+    }
+    
   } else if (inherits(file, "DRAC.list")) {
     input.raw <- as.data.frame(file)
-
+    
   } else if (inherits(file, "DRAC.data.frame")) {
     input.raw <- file
-
+    
   } else {
     stop("The provided data object is not a valid DRAC template.", call. = FALSE)
   }
   
   if (nrow(input.raw) > 50)
     stop("DRAC can only handle 50 data sets at once. Please reduce the number of rows and re-run this function again.", call. = FALSE)
-
+  
   # Settings ------------------------------------------------------------------------------------
   settings <- list(name = ifelse(missing(name),
                                  paste(sample(if(runif(1,-10,10)>0){LETTERS}else{letters},
                                               runif(1, 2, 4)), collapse = ""),
                                  name),
                    verbose = TRUE,
-                   url = "https://www.aber.ac.uk/en/iges/research-groups/quaternary/luminescence-research-laboratory/dose-rate-calculator/?show=calculator")
+                   url = "https://www.aber.ac.uk/en/dges/research/quaternary/luminescence-research-laboratory/dose-rate-calculator/?show=calculator")
   
   # override defaults with args in ...
   settings <- modifyList(settings, list(...))
-
+  
   # Set helper function -------------------------------------------------------------------------
   ## The real data are transferred without any encryption, so we have to mask the original
-
+  
   ##(0) set masking function
   .masking <- function(mean, sd, n) {
     temp <- rnorm(n = 30 * n, mean = mean,sd = sd)
@@ -163,17 +175,17 @@ use_DRAC <- function(
       })
     return(t(temp.result))
   }
-
-
+  
+  
   # Process data --------------------------------------------------------------------------------
   if (settings$verbose) message("\n\t Preparing data...")
-
+  
   ##(1) expand the rows in the data.frame a little bit
   mask.df <-  input.raw[rep(1:nrow(input.raw), each = 3), ]
-
+  
   ##(2) generate some meaningful randome variables
   mask.df <- lapply(seq(1, nrow(input.raw), by = 3), function(x) {
-
+    
     if (mask.df[x,"TI:52"] != "X") {
       ##replace some values - the De value
       mask.df[x:(x + 2), c("TI:52","TI:53")] <- .masking(
@@ -182,42 +194,42 @@ use_DRAC <- function(
         n = 3)
       return(mask.df)
     }
-
+    
   })
-
+  
   ##(3) bin values
   DRAC_submission.df <- rbind(input.raw,mask.df[[1]])
-
-
+  
+  
   ##(4) replace ID values
   DRAC_submission.df$`TI:1` <-   paste0(paste0(paste0(sample(if(runif(1,-10,10)>0){LETTERS}else{letters},
                                                              runif(1, 2, 4)), collapse = ""),
                                                ifelse(runif(1,-10,10)>0, "-", "")),
                                         gsub(" ", "0", prettyNum(seq(sample(1:50, 1, prob = 50:1/50, replace = FALSE),
                                                                      by = 1, length.out = nrow(DRAC_submission.df)), width = 2)))
-
-
-
+  
+  
+  
   ##(5) store the real IDs in a sperate object
   DRAC_results.id <-  DRAC_submission.df[1:nrow(input.raw), "TI:1"]
-
+  
   ##(6) create DRAC submission string
   DRAC_submission.df <- DRAC_submission.df[sample(x = 1:nrow(DRAC_submission.df), nrow(DRAC_submission.df),
                                                   replace = FALSE), ]
-
+  
   ##convert all columns of the data.frame to class 'character'
   for (i in 1:ncol(DRAC_submission.df))
     DRAC_submission.df[ ,i] <- as.character(DRAC_submission.df[, i])
-
+  
   if (settings$verbose) message("\t Creating submission string...")
   ##get line by line and remove unwanted characters
   DRAC_submission.string <- sapply(1:nrow(DRAC_submission.df), function(x) {
     paste0(gsub(",", "", toString(DRAC_submission.df[x, ])), "\n")
   })
-
+  
   ##paste everything together to get the format we want
   DRAC_input <- paste(DRAC_submission.string, collapse = "")
-
+  
   # Send data to DRAC ---------------------------------------------------------------------------
   if (settings$verbose) message(paste("\t Establishing connection to", settings$url))
 
@@ -225,7 +237,7 @@ use_DRAC <- function(
   DRAC.response <- httr::POST(settings$url,
                               body = list("drac_data[name]"  = settings$name,
                                           "drac_data[table]" = DRAC_input))
-
+  
   ## check for correct response
   if (DRAC.response$status_code != 200) {
     stop(paste0("[use_DRAC()] transmission failed with HTTP status code: ",
@@ -233,65 +245,80 @@ use_DRAC <- function(
   } else {
     if (settings$verbose) message("\t The request was successful, processing the reply...")
   }
-
+  
   ## assign DRAC response data to variables
   http.header <- DRAC.response$header
   DRAC.content <- httr::content(x = DRAC.response, as = "text")
-
+  
   ## if the input was valid from a technical standpoint, but not with regard
   ## contents, we indeed get a valid response, but no DRAC output
   if (!grepl("DRAC Outputs", DRAC.content)) {
+    error_start <- max(gregexpr("drac_field_error", DRAC.content)[[1]])
+    error_end <- regexec('textarea name=', DRAC.content)[[1]]
+    error_msg <- substr(DRAC.content, error_start, error_end)
+    
+    on.exit({
+      reply <- readline("Do you want to see the DRAC error message (Y/N)?")
+      if (reply == "Y" || reply == "y" || reply == 1)
+        cat(error_msg)
+    })
+    
     stop(paste("\n\t We got a response from the server, but it\n",
-                       "\t did not contain DRAC output. Please check\n",
-                       "\t your data and verify its validity.\n"),
+               "\t did not contain DRAC output. Please check\n",
+               "\t your data and verify its validity.\n"),
          call. = FALSE)
   } else {
     if (settings$verbose) message("\t Finalising the results...")
   }
-
+  
   ## split header and content
   DRAC.content.split <- strsplit(x = DRAC.content,
                                  split = "DRAC Outputs\n\n")
-
+  
   ## assign DRAC header part
   DRAC.header <- as.character(DRAC.content.split[[1]][1])
-
+  
   ## assign DRAC content part
   DRAC.raw <- read.table(text = as.character(DRAC.content.split[[1]][2]),
                          sep = ",",
                          stringsAsFactors = FALSE)
-
+  
   ## remove first two lines
-  DRAC.content <- DRAC.raw[-c(1, 2), ]
-
+  DRAC.content <- read.table(text = as.character(DRAC.content.split[[1]][2]),
+                             sep = ",", skip = 2,
+                             stringsAsFactors = FALSE)
+  
   ##Get rid of all the value we do not need anymore
   DRAC.content <-  subset(DRAC.content, DRAC.content$V1 %in% DRAC_results.id)
   DRAC.content <- DRAC.content[with(DRAC.content, order(V1)), ]
-
+  
   ##replace by original names
   DRAC.content[ ,1] <- input.raw[ ,1]
-
+  
   ## assign column names
   colnames(DRAC.content) <- DRAC.raw[1, ]
-
+  
   ## save column labels and use them as attributes for the I/O table columns
   DRAC.labels <- DRAC.raw[2, ]
   for (i in 1:length(DRAC.content)) {
     attr(DRAC.content[ ,i], "description") <- DRAC.labels[1,i]
   }
-
+  
   ## DRAC also returns the input, so we need to split input and output
   DRAC.content.input <- DRAC.content[ ,grep("TI:", names(DRAC.content))]
   DRAC.content.output <- DRAC.content[ ,grep("TO:", names(DRAC.content))]
-
+  
   ## The DRAC ouput also contains a hightlight table, which results in
   ## duplicate columns. When creating the data.frame duplicate columns
   ## are automatically appended '.1' in their names, so we can identify
   ## and remove them easily
   DRAC.content.input <- DRAC.content.input[ ,-grep("\\.1", names(DRAC.content.input))]
   DRAC.content.output <- DRAC.content.output[ ,-grep("\\.1", names(DRAC.content.output))]
+  
+  ## for some reason the returned input table is unsorted, so we resort it in increasing order
+  DRAC.content.input <- DRAC.content.input[ , paste0("TI:", 1:ncol(DRAC.content.input))]
 
-  ## The output table (v1.1) has 198 columns, making it unreasonable complex
+  ## The output table (v1.2) has 198 columns, making it unreasonable complex
   ## for standard data evaluation. We reproduce the DRAC highlight table
   ## and use the descriptions (saved as attributes) as column names.
   highlight.keys <- c("TI:1","TI:2","TI:3","TO:FQ","TO:FR",
@@ -305,26 +332,26 @@ use_DRAC <- function(
   for (i in 1:length(DRAC.highlights)) {
     attr(DRAC.highlights[ ,i], "key") <- highlight.keys[i]
   }
-
+  
   ## finally, we add the 'DRAC.highlights' class so that we can use a custom print method
   class(DRAC.highlights) <- c("DRAC.highlights", "data.frame")
-
+  
   ## Final Disclaimer
   messages <- list("\t Done! \n",
                    "\t We, the authors of the R package 'Luminescence', do not take any responsibility and we are not liable for any ",
                    "\t mistakes or unforeseen misbehaviour. All calculations are done by DRAC and it is outside our reference to",
                    "\t verify the input and output. \n",
-                   "\t Note that this function is only compatible with DRAC version 1.1. Before using this function make sure that",
+                   "\t Note that this function is only compatible with DRAC version 1.2. Before using this function make sure that",
                    "\t this is the correct version, otherwise expect unspecified errors.\n",
                    "\t Please ensure you cite the use of DRAC in your work, published or otherwise. Please cite the website name and",
-                   "\t version (e.g. DRAC v1.1) and the accompanying journal article:",
+                   "\t version (e.g. DRAC v1.2) and the accompanying journal article:",
                    "\t Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose rate and age calculation for trapped charge",
                    "\t dating. Quaternary Geochronology 28, 54-61. \n",
                    "\t Use 'verbose = FALSE' to hide this message. \n")
-
+  
   if (settings$verbose) lapply(messages, message)
-
-
+  
+  
   ## return output
   DRAC.return <- set_RLum("RLum.Results",
                           data = list(
@@ -337,6 +364,6 @@ use_DRAC <- function(
                             data = file,
                             call = sys.call(),
                             args = as.list(sys.call()[-1])))
-
+  
   invisible(DRAC.return)
 }
diff --git a/R/verify_SingleGrainData.R b/R/verify_SingleGrainData.R
index 7834e9c..aef7a93 100644
--- a/R/verify_SingleGrainData.R
+++ b/R/verify_SingleGrainData.R
@@ -1,9 +1,9 @@
-#' Verify single grain data sets and check for invalid grains, i.e. zero light level grains
+#' Verify single grain data sets and check for invalid grains, i.e. zero-light level grains
 #'
-#' This function tries to identify automatically zero light level curves (grains) from single grain data
+#' This function tries to identify automatically zero-light level curves (grains) from single grain data
 #' measurements. \cr
 #'
-#' \bold{How the method works?}\cr
+#' \bold{How does the method work?}\cr
 #'
 #' The function compares the expected values (\eqn{E(X)}) and the variance (\eqn{Var(X)})
 #' of the count values for each curve. Assuming that the background roughly follows a poisson
@@ -15,11 +15,11 @@
 #'
 #' \deqn{abs(E(x) - Var(x)) >= \Theta}
 #'
-#' With \eqn{\Theta} an arbitray, user defined, threshold. Values above indicating curves
+#' With \eqn{\Theta} an arbitray, user defined, threshold. Values above the threshold indicating curves
 #' comprising a signal.\cr
 #'
 #' Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the ratio was chosen as
-#' both can become 0 which would result in \code{Inf} values.
+#' both terms can become 0 which would result in 0 or \code{Inf}, if the ratio is calculated.
 #'
 #' @param object \code{\linkS4class{Risoe.BINfileData}} or \code{\linkS4class{RLum.Analysis}}
 #' (\bold{required}): input object. The function also accepts a list with objects of allowed type.
@@ -37,9 +37,9 @@
 #' curves of one aliquot (grain or disc) can be marked as invalid, but will not be removed. An aliquot
 #' will be only removed if all curves of this aliquot are marked as invalid.
 #'
-#' @param verbose \code{\link{logical}} (with default): enables or disables terminal feedback
+#' @param verbose \code{\link{logical}} (with default): enables or disables the terminal feedback
 #'
-#' @param plot \code{\link{logical}} (with default): enables or disables graphical feedback
+#' @param plot \code{\link{logical}} (with default): enables or disables the graphical feedback
 #'
 #' @return The function returns
 #'
@@ -62,7 +62,7 @@
 #'
 #' \bold{Output variation}\cr
 #'
-#' For \code{cleanup = TRUE} the same object as the input, but with cleaned up (invalid curves removed).
+#' For \code{cleanup = TRUE} the same object as the input is returned, but cleaned up (invalid curves were removed).
 #' This means: Either an \code{\linkS4class{Risoe.BINfileData}} or an \code{\linkS4class{RLum.Analysis}}
 #' object is returned in such cases. An \code{\linkS4class{Risoe.BINfileData}} object can be exported
 #' to a BIN-file by using the function \code{\link{write_R2BIN}}.
diff --git a/R/write_R2BIN.R b/R/write_R2BIN.R
index 82c19fa..e0c5966 100644
--- a/R/write_R2BIN.R
+++ b/R/write_R2BIN.R
@@ -42,7 +42,7 @@
 #' BIN/BINX-file may not fully compatible, at least not similar to the once
 #' directly produced by the Risoe readers!\cr
 #'
-#' @section Function version: 0.4.0
+#' @section Function version: 0.4.2
 #'
 #' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 #' (France)
@@ -56,7 +56,7 @@
 #' @references
 #'
 #' DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
-#' \url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+#' \url{http://www.nutech.dtu.dk/english/products-and-services/radiation-instruments/tl_osl_reader/manuals}
 #'
 #' @keywords IO
 #'
@@ -90,9 +90,8 @@ write_R2BIN <- function(
 
   }
 
-  ##check if it fullfills the last definition
-  if(ncol(object at METADATA)!=80){
-
+  ##check if it fullfills the latest definition
+  if(ncol(object at METADATA) != 80){
     stop("[write_R2BIN()] The number of columns in your slot 'METADATA' does not fit to the latest definition. What you are probably trying to do is to export a Risoe.BINfileData object you generated by your own or you imported with an old package version some time ago. Please re-import the BIN-file using the function read_BIN2R().")
 
   }
@@ -155,8 +154,7 @@ write_R2BIN <- function(
 
   ##check whether this file can be exported without problems due to the latest specifications
   if(ncol(object at METADATA) != 80){
-
-    stop("[write_R2BIN()] Your Risoe.BINfileData object seems not be compatible with the latest specification of this S4-class object. You are probably trying to export a Risoe.BINfileData from your workspace you produced manually or with an old version.")
+    stop("[write_R2BIN()] Your Risoe.BINfileData object seems not to be compatible with the latest specification of this S4-class object. You are probably trying to export a Risoe.BINfileData from your workspace you produced manually or with an old version.")
 
   }
 
@@ -301,7 +299,6 @@ write_R2BIN <- function(
        is(object at METADATA[1,"LTYPE"], "factor") == TRUE){
 
     object at METADATA[,"LTYPE"]<- sapply(1:length(object at METADATA[,"LTYPE"]),function(x){
-
       as.integer(LTYPE.TranslationMatrix[object at METADATA[x,"LTYPE"]==LTYPE.TranslationMatrix[,2],1])
 
     })
@@ -340,9 +337,6 @@ write_R2BIN <- function(
   ##before export
   object at METADATA[,"TAG"] <- ifelse(object at METADATA[,"SEL"] == TRUE, 1, 0)
 
-  ##
-
-
   # SET FILE AND VALUES -----------------------------------------------------
 
   con<-file(file, "wb")
@@ -540,7 +534,7 @@ write_R2BIN <- function(
                endian="little")
 
       ##BL_UNIT
-      writeBin(as.integer(object at METADATA[ID,"DTYPE"]),
+      writeBin(as.integer(object at METADATA[ID,"BL_UNIT"]),
                con,
                size = 1,
                endian="little")
@@ -601,12 +595,12 @@ write_R2BIN <- function(
 
       ##avoid problems with empty comments
       if(COMMENT_SIZE == 0){
-
         COMMENT_SIZE <- as.integer(2)
         object at METADATA[ID,"COMMENT"] <- "  "
 
       }
 
+
       writeBin(COMMENT_SIZE,
                con,
                size = 1,
@@ -655,7 +649,7 @@ write_R2BIN <- function(
                size = 2,
                endian="little")
 
-      ##Further distinction need to fully support format version 03 and 04 separately
+      ##Further distinction needed to fully support format version 03 and 04 separately
       if(version == 03){
 
 
@@ -702,8 +696,8 @@ write_R2BIN <- function(
                    con,
                    size = 1,
                    endian="little")
-        }else{
 
+        }else{
           writeBin(object at .RESERVED[[ID]][[2]],
                    con,
                    size = 1,
@@ -835,7 +829,7 @@ write_R2BIN <- function(
                endian="little")
 
       if(version == 08){
-        writeBin(object at METADATA[ID,"RECTYPE"],
+        writeBin(as.integer(object at METADATA[ID,"RECTYPE"]),
                  con,
                  size = 1,
                  endian="little")
@@ -1025,7 +1019,7 @@ write_R2BIN <- function(
                endian="little")
 
       ##BL_UNIT
-      writeBin(as.integer(object at METADATA[ID,"DTYPE"]),
+      writeBin(as.integer(object at METADATA[ID,"BL_UNIT"]),
                con,
                size = 1,
                endian="little")
diff --git a/R/write_RLum2CSV.R b/R/write_RLum2CSV.R
new file mode 100644
index 0000000..f8a26aa
--- /dev/null
+++ b/R/write_RLum2CSV.R
@@ -0,0 +1,240 @@
+#' Export RLum-objects to CSV
+#'
+#' This function exports \code{\linkS4class{RLum}}-objects to CSV-files using the R function
+#' \code{\link[utils]{write.table}}. All \code{\linkS4class{RLum}}-objects are supported, but the
+#' export is lossy, i.e. the pure numerical values are exported only. Information that cannot
+#' be coerced to a \code{\link{data.frame}} or a \code{\link{matrix}} are discarded as well as
+#' metadata.
+#'
+#' However, in combination with the implemented import functions, nearly every supported
+#' import data format can be exported to CSV-files, this gives a great deal of freedom in terms of
+#' compatibility with other tools.\cr
+#'
+#' \bold{Input is a list of objects}\cr
+#'
+#' If the input is a \code{\link{list}} of objects all explicit function arguments can be provided
+#' as \code{\link{list}}.
+#'
+#' @param object \code{\linkS4class{RLum}} or a \code{\link{list}} of \code{RLum} objects (\bold{required}): objects to be written
+#'
+#' @param path \code{\link{character}} (optional): character string naming folder for the output to be written. If nothing
+#' is provided \code{path} will be set to the working directory. Note: this argument is ignored if the
+#' the argument \code{export} is set to \code{FALSE}.
+#'
+#' @param prefix \code{\link{character}} (with default): optional prefix to name the files. This prefix
+#' is valid for all written files
+#'
+#' @param export \code{\link{logical}} (with default): enable or disable the file export. If set to \code{FALSE}
+#' nothing is written to the file connection, but a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#' is returned instead
+#'
+#' @param \dots further arguments that will be passed to the function \code{\link[utils]{write.table}}. All arguments
+#' except the argument \code{file} are supported
+#'
+#'
+#' @return The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE}
+#' a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+#'
+#'
+#' @section Function version: 0.1.1
+#'
+#' @author Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+#'
+#' @seealso \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+#' \code{\link[utils]{write.table}}
+#'
+#' @keywords IO
+#'
+#' @examples
+#'
+#' ##transform values to a list
+#' data(ExampleData.BINfileData, envir = environment())
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]]
+#' write_RLum2CSV(object, export = FALSE)
+#'
+#' \dontrun{
+#'
+#' ##export data to CSV-files in the working directory;
+#' ##BE CAREFUL, this example creates many files on your file system
+#' data(ExampleData.BINfileData, envir = environment())
+#' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]]
+#' write_RLum2CSV(object, export = FALSE)
+#'
+#' }
+#'
+#' @export
+write_RLum2CSV <- function(
+  object,
+  path = NULL,
+  prefix = "",
+  export = TRUE,
+  ...
+
+){
+
+  # General tests -------------------------------------------------------------------------------
+  if(missing(object)){
+    stop("[write_RLum2CSV()] input object is missing!", call. = FALSE)
+
+  }
+
+
+  # Self-call -----------------------------------------------------------------------------------
+  ##this option allows to work on a list of RLum-objects
+  if(is.list(object)){
+
+    ##extent the list of arguments if set
+      ##path
+      path <- rep(list(path), length = length(object))
+
+      ##prefix ... create automatic prefix if nothing is provided
+      if(prefix == ""){
+        prefix <- as.list(paste0("[[",1:length(object),"]]_"))
+
+      }else{
+        prefix <- rep(list(prefix), length = length(object))
+
+      }
+
+      ##export
+      export <- rep(list(export), length = length(object))
+
+    ##execute the self-call function
+      temp <- lapply(1:length(object), function(x){
+        write_RLum2CSV(
+          object = object[[x]],
+          path = path[[x]],
+          prefix = prefix[[x]],
+          export = export[[x]],
+          ...
+        )
+
+      })
+
+      ##this prevents that we get a list of NULL
+      if(is.null(unlist(temp))){
+        return(NULL)
+
+      }else{
+        return(temp)
+
+      }
+
+  }
+
+  # Integrity tests -----------------------------------------------------------------------------
+
+  ##check path
+
+    ##if NULL condition
+    if(export == TRUE && is.null(path)){
+      path <- getwd()
+      message(paste0("[write_RLum2CSV()] Path automatically set to: ", path))
+
+    }
+
+    ##non NULL conditon
+    if(export == TRUE && !dir.exists(path)){
+      stop("[write_RLum2CSV()] Diretory provided via the argument 'path' does not exist!", call. = FALSE)
+
+    }
+
+  ## What do we need at the end of the day is a named list of data.frames or matrices we can export
+  ## using the function write.table; the name of the list elements will become the file names
+  if(inherits(object, "RLum")){
+    if(is(object, "RLum.Analysis") ||
+       is(object, "RLum.Data.Curve") ||
+       is(object, "RLum.Data.Spectrum") || is(object, "RLum.Data.Image")){
+
+      ##extract all elements ... depending on the input
+      if(is(object, "RLum.Analysis")){
+        ##tricky, we cannot use get_RLum() as the function lapply calls as.list() for an object!
+        object_list <- lapply(object, function(x){get_RLum(x)})
+
+        ##change names of the list and produce the right format straight away
+        names(object_list) <- paste0(1:length(object_list),"_",names(object))
+
+      } else {
+
+        ##get object and make list
+        object_list <- list(get_RLum(object))
+
+        ##set new name
+        names(object_list) <- paste0("1_",object at recordType)
+
+      }
+
+    }else if(is(object, "RLum.Results")){
+
+      ##we just try the typical R way and hope the best
+      object_list <- unlist(object at data, recursive = FALSE)
+
+      ##sort out objects we do not like and we cannot procede ...
+      object_list <- object_list[vapply(object_list, function(x) {
+        is.data.frame(x) |
+          is.matrix(x) |
+          is.numeric(x)
+      }, vector(mode = "logical", length = 1))]
+
+
+      ##adjust the names
+      names(object_list) <- paste0(1:length(object_list),"_",names(object_list))
+
+
+    }else{
+      try(stop("[write_RLum2CSV()] One particular RLum-object is not yet supported! NULL returned!", call. = FALSE))
+      return(NULL)
+
+    }
+
+  }else{
+   stop("[write_RLum2CSV()] Object needs to be a member of the object class RLum!", call. = FALSE)
+
+  }
+
+  # Export --------------------------------------------------------------------------------------
+  if(export){
+
+    ##set export settings for write.table
+    export_settings.default <- list(
+      append = FALSE,
+      quote = TRUE,
+      sep = ";",
+      eol = "\n",
+      na = "NA",
+      dec = ".",
+      row.names = FALSE,
+      col.names = FALSE,
+      qmethod = c("escape", "double"),
+      fileEncoding = ""
+
+    )
+
+    ##modify on demand
+    export_settings <- modifyList(x = export_settings.default, val = list(...))
+
+    ##write files to file system
+    for(i in 1:length(object_list)){
+      utils::write.table(
+        x = object_list[[i]],
+        file = paste0(path,"/",prefix, names(object_list)[i],".csv"),
+        append = export_settings$append,
+        quote =  export_settings$quote,
+        sep =  export_settings$sep,
+        eol =  export_settings$eol,
+        na =  export_settings$na,
+        dec =  export_settings$dec,
+        row.names =  export_settings$row.names,
+        col.names =  export_settings$col.names,
+        qmethod =  export_settings$qmethod,
+        fileEncoding =  export_settings$fileEncoding)
+
+    }
+
+
+  }else{
+    return(object_list)
+
+  }
+
+}
diff --git a/R/zzz.R b/R/zzz.R
index b9403de..4f3198a 100644
--- a/R/zzz.R
+++ b/R/zzz.R
@@ -29,7 +29,7 @@ assign("col",
   try(packageStartupMessage(paste("Welcome to the R package Luminescence version ",
                               packageDescription(pkg="Luminescence")$Version,
                               " [Built: ",
-                              strsplit(packageDescription(pkg="Luminescence")$Packaged, ";")[[1]][1],
+                              trimws(strsplit(packageDescription(pkg="Luminescence")$Built, ";")[[1]][3]),
                              "]", sep=""),
                             "\n",
                             get_Quote()), silent=TRUE)
diff --git a/data/ExampleData.BINfileData.RData b/data/ExampleData.BINfileData.RData
index cf0cff4..116cff1 100644
Binary files a/data/ExampleData.BINfileData.RData and b/data/ExampleData.BINfileData.RData differ
diff --git a/data/ExampleData.Fading.RData b/data/ExampleData.Fading.RData
new file mode 100644
index 0000000..820997a
Binary files /dev/null and b/data/ExampleData.Fading.RData differ
diff --git a/data/ExampleData.portableOSL.RData b/data/ExampleData.portableOSL.RData
new file mode 100644
index 0000000..abb094a
Binary files /dev/null and b/data/ExampleData.portableOSL.RData differ
diff --git a/data/datalist b/data/datalist
index 8ad23c4..6acaffb 100644
--- a/data/datalist
+++ b/data/datalist
@@ -1,10 +1,12 @@
-BaseDataSet.CosmicDoseRate
-ExampleData.DeValues
-ExampleData.FittingLM
-ExampleData.LxTxData
-ExampleData.LxTxOSLData
-ExampleData.BINfileData
-ExampleData.CW_OSL_Curve
-ExampleData.RLum.Analysis
-ExampleData.RLum.Data.Image
-ExampleData.XSYG
\ No newline at end of file
+BaseDataSet.CosmicDoseRate
+ExampleData.DeValues
+ExampleData.Fading
+ExampleData.FittingLM
+ExampleData.LxTxData
+ExampleData.LxTxOSLData
+ExampleData.BINfileData
+ExampleData.CW_OSL_Curve
+ExampleData.RLum.Analysis
+ExampleData.RLum.Data.Image
+ExampleData.XSYG
+ExampleData.portableOSL
\ No newline at end of file
diff --git a/inst/CITATION b/inst/CITATION
index 20e7a45..a2c6655 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -45,5 +45,10 @@ citHeader("To cite the package 'Luminescence' in publications use:")
 				pages = "12-18",
 				doi = "10.1016/j.quageo.2015.09.003")
 
-
-
+		bibentry(bibtype = "Article",
+			  	title = "Bayesian statistics in luminescence dating: The baSAR-model and its implementation in the R package 'Luminescence'",
+				author = "Mercier, Norbert and Kreutzer, Sebastian and Christophe, Claire and Gu{\'e}rin, Guillaume and Guibert, P and Lahaye, Christelle and Lanos, Philippe and Philippe, Anne and Tribolo, Chantal",
+				year = "2016",
+				journal = "Ancient TL",
+				volume = "34",
+				pages = "14-21")
diff --git a/inst/NEWS.Rd b/inst/NEWS.Rd
index 8693207..98f0672 100644
--- a/inst/NEWS.Rd
+++ b/inst/NEWS.Rd
@@ -1,38 +1,645 @@
 \name{NEWS}
 \title{NEWS for the R Package Luminescence}
-\section{Changes in version 0.6.4 (9th September 2016)}{
+\section{Changes in version 0.7.5 (30th June, 2017)}{
 
+  \subsection{Bugfixes and changes}{
+    \itemize{
+
+
+      \item \code{analyse_SAR.CWOSL()}
+
+        \itemize{
+
+          \item If the signal integral was wrong, the default value was not set correctly (#46).
+
+        }
+
+
+      \item \code{calc_AverageDose()}
+
+        \itemize{
+
+          \item Update documentation and add produced output,
+          \item unify data.frame return output arguments (all capital letters).
+
+        }
+
+
+
+      \item \code{calc_FastRatio()}
+
+        \itemize{
+
+          \item Update slot names, which led to an output error.
+
+        }
+
+
+      \item \code{extract_IrradiationTimes()}
+
+        \itemize{
+
+          \item The exported BINX-file now works with the Analyst and the g-value can be
+          calculated therein (thanks to Geoff Duller).
+
+        }
+
+      \item \code{plot_FilterCombinations()}
+
+        \itemize{
+
+          \item Calculate optical density and return it,
+          \item fix calclation of transmission window,
+          \item improve plot output.
+
+        }
+
+          \item \code{plot_RadialPlot()}
+
+        \itemize{
+
+          \item Fix error which occasionally occurred if a list of \code{data.frame}s are provided (thanks to Christina Neudorf for spotting the bug).
+
+        }
+
+
+     \item \code{read_BIN2R()}
 
+        \itemize{
+
+          \item Improve error messages for corrupted BIN/BINX-files,
+          \item ensure that the file connection is closed sufficiently.
+
+        }
+
+      \item \code{RisoeBINfileData2RLum.Analysis()}
+
+        \itemize{
+
+          \item The grain selection was not accepted and caused a constant error (#45).
+
+        }
+
+         \item \code{use_DRAC()}
+
+        \itemize{
+
+          \item The DRAC URL had changed; fixed.
+
+        }
+
+
+
+
+
+    }
+  }
+
+
+   \subsection{Miscellaneous}{
+    \itemize{
+
+        \item Fix package welcome message.
+
+
+    }
+
+   }
+
+
+
+}
+
+\section{Changes in version 0.7.4 (31st March, 2017)}{
+
+
+    \subsection{Changes in S4-classes and methods}{
+      \itemize{
+        \item \code{get_RLum} for \code{RLum.Analysis}-objects now returns an error and \code{NULL} if the
+        \code{record.id} is not valid.
+
+      }
+  }
 
   \subsection{Bugfixes and changes}{
-   \itemize{
+    \itemize{
+
+
+
+     \item \code{analyse_baSAR()}{
+        \itemize{
+           \item The option to force the dose response curve trough the origin was not correctly
+           implemented; fixed.
 
+      }
 
-      \item \code{analyse_baSAR()}
+    }
 
-       \itemize{
-          \item Fix problem that causes a function crash if an XLS-file was provided as input
-          for the grain selection.
-       }
+     \item \code{analyse_FadingMeasurement()}{
+        \itemize{
+           \item The function returned unreliable results since the time since irradiation had
+           been doubled. This bug only affected Lx/Tx data imported from an XSYG-file.
 
-      \item \code{analyse_pIRIRSequence()}
+      }
 
-       \itemize{
-          \item Account for a minor layout problem while plotting the combined growth curve (y-axis
-          scaling was not sufficient)
-       }
+    }
 
+    \item \code{analyse_SAR.TL()}{
+        \itemize{
+           \item A test code snippet made it into the final package. With this the Lx/Tx error was
+           taken as fixed value (10/100) from the Lx/Tx value itself. The calculated error was not
+           considered; corrected,
+           \item function returns \code{NA} for the error if the background signals are similar
+           and the error would become 0,
+           \item new argument \code{integral_input} added to allow for an integral definition based
+           on temperatures and not channels.
 
-      \item \code{plot_AbanicoPlot()}
+      }
 
-       \itemize{
-          \item The relative and absolute standard deviation were mixed up in in the summary; fixed.
-       }
+    }
 
+     \item \code{calc_TLLxTxRatio()}{
+        \itemize{
+           \item Arguments \code{Lx.data.background} and  \code{Tx.data.background} are now
+           pre-set to \code{NULL}, i.e. the function does not longer check for missing entries.
 
       }
 
     }
 
 
+
+   \item \code{plot_KDE()}{
+        \itemize{
+           \item Further support for layout options as requested by Christopher Luethgens.
+     }
+     }
+
+ \item \code{plot_GrowthCurve)}{
+        \itemize{
+           \item Rename argument options for argument \code{mode} to \code{'interpolation'} and
+           \code{'extrapolation'} instead of \code{'regenerative'} and \code{'additive'}.
+           \item fix a rather rare bug using the combination \code{fit.force_through_origin = FALSE} and
+           \code{mode = "extrapolation"},
+           \item the graphical representation for \code{mode = "extrapolation"} was not correct (#38).
+
+     }
+     }
+
+  \item \code{plot_RLum.Data.Spectrum)}{
+        \itemize{
+           \item Fixwrong axtick labels for interactive plot option (#39),
+           \item correct manual.
+     }
+     }
+
+  \item \code{plot_RLum.Analysis)}{
+        \itemize{
+           \item Add support for the argument 'type' of the argument 'combine = TRUE' is used.
+     }
+     }
+
+     \item \code{read_BIN2R()}{
+        \itemize{
+           \item Correct minor bug while importing corrupt BIN-files,
+           \item add support for internet connections,
+           \item if a directory was provided the functions was trapped in an endless loop (#36)
+     }
+     }
+
+     \item \code{write_R2BIN()}{
+        \itemize{
+           \item Argument 'BL_UNIT' was not correctly exported; fixed,
+           \item export behaviour for BIN-file version 08 improved.
+     }
+
+    }
+   }
+  }
+
+  \subsection{Miscellaneous}{
+     \itemize{
+
+     \item BIN-file example data sets can now be exported without error to BIN-files using
+      \code{write_R2BIN()}.
+    }
+  }
+
+
+}
+
+
+
+\section{Changes in version 0.7.3 (8th Feburary, 2017)}{
+
+  \subsection{Bugfixes and changes}{
+  \itemize{
+
+
+   \item \code{Risoe.BINfileData()}{
+    \itemize{
+       \item Correct for mistakes in the manual.
+     }
+    }
+
+   \item \code{write_R2BIN()}{
+    \itemize{
+       \item Correct for broken function (introduced with v0.7.0).
+     }
+    }
+   }
+  }
+
+  \subsection{Miscellaneous}{
+  \itemize{
+
+  \item Correct wrong package date format.
+  \item Add NEWS again to the package.
+ }
+
+}
+
+}
+
+
+\section{Changes in version 0.7.2 (7th February (evening), 2017)}{
+
+  \itemize{
+  \item The CRAN check on the Solaris machines gave an error while performing
+the (on all other platform sucessful) unit tests. Consequently, and to
+reduce the load for the CRAN resources all tests are skipped on CRAN.
+  \item This version never made it on CRAN!
+
+  }
+
+
+
+}
+
+
+\section{Changes in version 0.7.1 (6th February (evening), 2017)}{
+
+ \itemize{
+
+\item This release accounts for the CRAN check errors on the Solaris machines by
+preventing the unfortunate overload of the C++ function pow() with integer
+values.
+
+}
+
+}
+
+\section{Changes in version 0.7.0 (6th February (morning), 2017)}{
+
+  \subsection{New functions}{
+    \itemize{
+
+        \item \code{analyse_FadingMeasurement()}:
+        Analyse fading measurements to calculate g-values and to estimate
+        the density of recombination centres.
+
+        \item \code{analyse_portableOSL()}:
+        The function analyses CW-OSL curve data produced by a SUERC portable
+        OSL reader and produces a combined plot of OSL/IRSL signal intensities,
+        OSL/IRSL depletion ratios and the IRSL/OSL ratio.
+
+        \item \code{calc_Kars2008()}:
+        A function to calculate the expected sample specific fraction of saturation
+        following Kars et al. (2008) and Huntley (2006).
+
+        \item \code{calc_AverageDose()}: Function to calculate the average dose and their extrinsic
+        dispersion.
+
+        \item \code{convert_BIN2R()}: wrapper function around the functions \code{read_BIN2R()} and
+        \code{write_RLum2CSV()} to convert a BIN-file to CSV-files; so far possible.
+
+         \item \code{convert_Daybreak2R()}: wrapper function around the functions \code{read_Daybreak2R()} and
+        \code{write_RLum2CSV()} to convert Daybreak measurement data (TXT-file, DATE-file) to CSV-files; so far possible.
+
+        \item \code{convert_PSL2R()}: wrapper function around the functions \code{read_PSL2R()} and
+        \code{write_RLum2CSV()} to convert a PSL-file (SUERC portable OSL reader file format) to CSV-files; so far possible.
+
+        \item \code{convert_XSYG2R()}: wrapper function around the functions \code{read_XSYG2R()} and
+        \code{write_RLum2CSV()} to convert XSYG-file to CSV-files; so far possible.
+
+        \item \code{github_branches(), github_commits(), github_issues()}:
+        R Interface to the GitHub API v3. These functions can be used to query a specific repository hosted on GitHub.
+
+        \item \code{install_DevelopmentVersion()}:
+        This function is a convenient method for installing the development version of the R package
+        'Luminescence' directly from GitHub.
+
+        \item \code{PSL2Risoe.BINfileData()}:
+        Converts an \code{RLum.Analysis} object produced by the function
+        \code{read_PSL2R()} to an \code{Risoe.BINfileData} object.
+
+        \item \code{read_PSL2R()}:
+        Imports PSL files produced by a SUERC portable OSL reader into R.
+
+        \item \code{smooth_RLum()}: wrapper function to call the corresponding methods to smooth
+        data based on the function \code{zoo:rollmean}.
+
+        \item \code{write_RLum2CSV()}:
+        Exports \code{RLum}-objects to CSV-files to improve the compatibility to other software.
+        Supported are only numerical values, i.e., \code{data.frame}, \code{matrix} and \code{numeric}.
+
+    }
+  }
+
+    \subsection{New example data}{
+    \itemize{
+
+      \item \code{ExampleData.fading}:
+        Example data set for fading measurements of the IR50, IR100, IR150 and IR225
+        feldspar signals of sample UNIL/NB123. It further contains regular
+        equivalent dose measurement data of the same sample, which can be used
+        to apply a fading correction to. These data were kindly provided by Georgina King.
+    }
+  }
+
+    \subsection{Changes in S4-classes and methods}{
+      \itemize{
+
+      \item Method \code{get_RLum} for \code{RLum.Analysis}-objects did not respect \code{.pid}, fixed.
+
+      \item Method \code{get_RLum} for \code{list}-objects now accepts lists with all kinds of
+      \code{RLum}-objects. Previously, only lists of \code{RLum.Analysis}-objects were allowed.
+
+      \item \code{plot_RLum} was not passing the argument \code{sub}, as it was fetched by the partial
+      argument matching; fixed.
+
+      \item \code{set_RLum} produced \code{NA} as originator, if the function calling the function
+      \code{set_RLum()} was called from outside of the package using the double colons
+      (e.g., \code{Luminescence::function()}); fixed.
+
+      \item \code{smooth_RLum} add method support for \code{RLum.Data.Curve}, \code{RLum.Analysis} and
+      \code{list} of this objects implemented.
+
+
+      }
+
+    }
+
+  \subsection{Bugfixes and changes}{
+
+  \itemize{
+
+   \item \code{analyse_baSARL()}{
+    \itemize{
+       \item Due to a typo in the manual the \code{method_control} parameter \code{variable.names} was
+       not working if correctly typed as written in the manual (in the manual: 'variables.names', but
+       correct is 'variable.names'); typo corrected fixed,
+       \item minor improvements and error corrections.
+
+    }
+  }
+
+
+  \itemize{
+
+   \item \code{analyse_IRSAR.RF()}{
+
+  \itemize{
+    \item Add option for a vertical sliding of the RF_nat curve (\code{method_control = list(vslide_range = 'auto')}). This
+    feature has beta status and usage for publication work is not recommended yet. By default no vertical
+    sliding is applied,
+   \item allow a parallel processing of MC runs by using the argument \code{method_control = list(cores = 'auto')}.
+   }
+  }
+
+  \item \code{analyse_SAR.CWOSL()}{
+    \itemize{
+       \item Fix wrongly set threshold value for recuperation rate (#26),
+       \item fix a rare bug for the combination 'recyling.ratio = NA' and more than
+       one provided recyling point,
+       \item a check has been implemented to refrain from using wrong rejection criteria
+       keywords.
+    }
+  }
+
+  \item \code{calc_AliquotSize()}{
+    \itemize{
+       \item Console output can now be suppressed via 'verbose = TRUE' (#24).
+    }
+  }
+
+
+  \item \code{calc_CosmicDoseRate()}{
+    \itemize{
+       \item Console output can now be suppressed via 'verbose = TRUE' (#24).
+    }
+  }
+
+    \item \code{calc_FastRatio()}{
+    \itemize{
+       \item New arguments 'Ch_L2' and 'Ch_L3' to allow the user to specify
+       custom values for channels L2 and L3. Feature requested by A. Versendaal (#29).
+    }
+  }
+
+  \item \code{calc_FadingCorr()}{
+    \itemize{
+       \item Fixed a bug where the function would crash when providing an
+       \code{RLum.Results} object for \code{g_value},
+       \item new argument \code{interval} to control the age interval for solving the equation
+       via \code{uniroot}.
+
+    }
+
+  }
+
+  \item \code{calc_FiniteMixture()}{
+    \itemize{
+       \item Fixed a bug where certain arguments where not passed to `plot_RLum.Results`
+       so that the plot was not as customisable as intended.
+       Thanks to Daniele Questiaux for reporting the bug.
+    }
+  }
+
+  \item \code{calc_MaxDose()}{
+    \itemize{
+       \item Fixed a bug in the console output, which provided wrong values for the
+       asymmetric error on gamma (Note that the values in the output object
+       were correct!). Thankfully reported by Xue Rui.
+    }
+  }
+
+    \item \code{calc_Statistics()}{
+    \itemize{
+       \item The argument \code{n.MC} got a new value \code{NULL} which is now
+       used as default. With this the basic statistical measures are in accordance
+       with the expectations (e.g., the standard deviation is returned by default in
+       the conventional way and not calculated using an MC simulation).
+    }
+  }
+
+
+    \item \code{calc_OSLLxTxRatio()}{
+    \itemize{
+       \item Add argument \code{use_previousBG} to use the background of the Lx-curve to
+       get the net signal of the Tx-curve (request #15).
+    }
+  }
+
+      \item \code{fit_CWCurve()}{
+    \itemize{
+       \item Change order of \code{RLum.Results} output list elements,
+       \item rename first element to \code{data},
+       \item add element slot 'info'.
+    }
+  }
+
+   \item \code{fit_LWCurve()}{
+    \itemize{
+       \item Change order of \code{RLum.Results} output list elements,
+       \item rename first element to \code{data},
+       \item add element slot 'info'.
+    }
+  }
+
+  \item \code{model_LuminescenceSignals()}{
+    \itemize{
+       \item Update function arguments to account for changes in RLumModel version 0.2.0.
+      }
+    }
+
+
+    \item \code{plot_DetPlot()}{
+      \itemize{
+        \item Correct negative y-axis, the minimum is now the real minimum.
+      }
+
+    }
+
+      \item \code{plot_GrowthCurve()}{
+        \itemize{
+           \item Reduce number of confusing warning,
+           \item add new argument \code{mode} to select the calculation mode of the function.
+           This allows in particular to only fit data without calculating a De or calculating
+           a De assuming an additive dose response curve,
+           \item account for the very specifc case that all dose points are similar. The function
+           stops with an error and returns NULL,
+           \item under weird circumstances points on the growth curve were not plotted correctly; fixed.
+
+        }
+
+      }
+
+    \item \code{plot_RadialPlot()}{
+        \itemize{
+           \item Sometimes the function crashed with an out of bonds error if more than one data set
+           was provided; fixed,
+           \item argument \code{negatives} caused an error if not set to \code{'remove'} and fix
+           some errors around this option,
+           \item De-adjustment for negative values optimised for large scatter.
+
+        }
+
+      }
+
+    \item \code{plot_RLum.Analysis()}{
+        \itemize{
+           \item The usage of the argument \code{smooth} led to a crash; fixed.
+
+        }
+
+    }
+
+   \item \code{plot_RLum.Data.Curve()}{
+        \itemize{
+           \item Function will not stop anymore if the curve contains \code{NA} values, but if
+           the curve consists of only \code{NA} values.
+
+        }
+
+    }
+
+     \item \code{plot_RLum.Data.Spectrum()}{
+        \itemize{
+           \item The interactive plot option was broken with the last update of the
+           package 'plotly'; fixed.
+
+        }
+
+    }
+
+      \item \code{plot_ViolinPlot()}{
+        \itemize{
+           \item The function erroneously produced a NA value warning; fixed.
+
+        }
+
+    }
+
+
+    \item \code{read_BIN2R()}{
+        \itemize{
+
+        \item If BIN-files are automatically imported the function skipped non BIN-files
+        without crashing if it is used in combination with the argument \code{pattern},
+        \item add new argument \code{irgnore.RECTYPE} to provide a solution for broken BIN-files
+        are BIN-files with non documented entries. Furthermore the general behaviour for such
+        cases had been optimised.
+      }
+     }
+
+     \item \code{read_Daybreak2R()}{
+        \itemize{
+
+        \item Add support for DAT-files produced by at 1100 reader using the software (TLAPLLIC v.3.2).
+        Thanks to Antoine Zink,
+        \item minor error corrections and adding example code.
+
+      }
+     }
+
+
+    \item \code{template_DRAC()}{
+        \itemize{
+
+        \item Fixed a typo in the column names (#28).
+
+      }
+
+     }
+
+
+   \item \code{use_DRAC()}{
+    \itemize{
+       \item Now supports DRAC v1.2 and the newly introduced CSV input template.
+       Older v1.1 excel sheet input templates are still supported, but users
+       are highly encouraged to use the new CSV file.
+
+       \item Columns in the output tables are now assigned proper classes (#27).
+    }
+  }
+
+    }
+  }
+
+}
+
+  \subsection{Internals}{
+
+   \itemize{
+      \item The internal function converting BIN-file curves to RLum.Data.Curve() objects had been
+      optimised and, amongst others, now uses a function written using Rcpp to create the curve matrix.
+      The conversion now works ca. two times faster,
+
+      \item add \code{`[<-]`} method for \code{RLum.Data.Curve} objects,
+
+      \item a hint on how to cite a function is now added automatically to every major function manual page,
+
+      \item add 'magrittr' to the package dependencies (imports) to further support the usage of this amazing
+      pipe operator,
+
+      \item thanks to Johannes Friedrich this release introduces regular unit tests using
+      the package 'testthat' to improve the code quality and stability,
+
+      \item add internal helper function \code{.smoothing}; no Rd entry.
+
+    }
+
+  }
+
 }
diff --git a/man/Analyse_SAR.OSLdata.Rd b/man/Analyse_SAR.OSLdata.Rd
index 0112dc3..e305f33 100644
--- a/man/Analyse_SAR.OSLdata.Rd
+++ b/man/Analyse_SAR.OSLdata.Rd
@@ -103,8 +103,9 @@ consider using the functions \code{\link{analyse_SAR.CWOSL}} or
 to use the function \link{analyse_SAR.CWOSL} or instead.}
 }
 \section{Function version}{
- 0.2.17 (2016-05-02 09:36:06)
+ 0.2.17 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -124,11 +125,12 @@ output.SAR <- data.frame(Dose = output$LnLxTnTx[[1]]$Dose,
                          LxTx.Error = output$LnLxTnTx[[1]]$LxTx.Error)
 output.SAR
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Fuchs, M.C., Fuchs, M. (2017). Analyse_SAR.OSLdata(): Analyse SAR CW-OSL measurements.. Function version 0.2.17. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France), Margret C. Fuchs, HZDR, Freiberg (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
 after bleaching. Quaternary Science Reviews 7, 387-393.
@@ -146,6 +148,9 @@ improved single-aliquot regenerative-dose protocol. Radiation Measurements
 
 and for further analysis \link{plot_GrowthCurve}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Margret C. Fuchs, HZDR, Freiberg (Germany)
+\cr R Luminescence Package Team}
 \keyword{datagen}
 \keyword{dplot}
-
diff --git a/man/BaseDataSet.CosmicDoseRate.Rd b/man/BaseDataSet.CosmicDoseRate.Rd
index ee83cda..105fb03 100644
--- a/man/BaseDataSet.CosmicDoseRate.Rd
+++ b/man/BaseDataSet.CosmicDoseRate.Rd
@@ -91,6 +91,7 @@ calculation
 \section{Version}{
  0.1
 }
+
 \examples{
 
 ##load data
@@ -108,4 +109,3 @@ for luminescence and ESR dating: large depths and long-term time variations.
 Radiation Measurements, 23, pp. 497-500.
 }
 \keyword{datasets}
-
diff --git a/man/CW2pHMi.Rd b/man/CW2pHMi.Rd
index 0eaef0e..002b412 100644
--- a/man/CW2pHMi.Rd
+++ b/man/CW2pHMi.Rd
@@ -72,8 +72,9 @@ no further interpolation is attempted.\cr In every case a warning message is
 shown.
 }
 \section{Function version}{
- 0.2.2 (2015-11-29 17:27:48)
+ 0.2.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -151,12 +152,12 @@ lines(values[1:length(values.t[,1]),1],CW2pPMi(values, P=1/10)[,2],
 text(0.5,6500,"PM", col="blue" ,cex=.8)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). CW2pHMi(): Transform a CW-OSL curve into a pHM-OSL curve via interpolation under hyperbolic modulation conditions. Function version 0.2.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France) \cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
-Delft University of Technology, The Netherlands\cr
-\cr R Luminescence Package Team}
+
 \references{
 Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
 signal components. Radiation Measurements, 47, 752-758.\cr
@@ -173,5 +174,9 @@ LM-OSL curves. Radiation Measurements, 32, 141-145.
 \code{\link{fit_LMCurve}}, \code{\link{lm}},
 \code{\linkS4class{RLum.Data.Curve}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France) \cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/CW2pLM.Rd b/man/CW2pLM.Rd
index 493d1aa..6a04270 100644
--- a/man/CW2pLM.Rd
+++ b/man/CW2pLM.Rd
@@ -38,8 +38,9 @@ The transformation is recommended for curves recorded with a channel
 resolution of at least 0.05 s/channel.
 }
 \section{Function version}{
- 0.4.1 (2015-11-29 17:27:48)
+ 0.4.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -63,11 +64,12 @@ values.transformed <- CW2pLM(values)
 plot(values.transformed)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). CW2pLM(): Transform a CW-OSL curve into a pLM-OSL curve. Function version 0.4.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Bulur, E., 2000. A simple transformation for converting CW-OSL
 curves to LM-OSL curves. Radiation Measurements, 32, 141-145.
@@ -86,5 +88,8 @@ The output of the function can be further used for LM-OSL fitting:
 \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}},
 \code{\link{plot_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/CW2pLMi.Rd b/man/CW2pLMi.Rd
index 96404f6..caed068 100644
--- a/man/CW2pLMi.Rd
+++ b/man/CW2pLMi.Rd
@@ -63,8 +63,9 @@ provided manually and more than two points are extrapolated, a warning
 message is returned.
 }
 \section{Function version}{
- 0.3.1 (2015-11-29 17:27:48)
+ 0.3.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -109,12 +110,12 @@ lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
 text(0.5,6500,"PM", col = "blue", cex = .8)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). CW2pLMi(): Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear modulation conditions. Function version 0.3.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux
-Montaigne\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
-Delft University of Technology, The Netherlands\cr
-\cr R Luminescence Package Team}
+
 \references{
 Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
 signal components. Radiation Measurements, 47, 752-758.\cr
@@ -130,5 +131,9 @@ LM-OSL curves. Radiation Measurements, 32, 141-145.
 \code{\link{CW2pLM}}, \code{\link{CW2pHMi}}, \code{\link{CW2pPMi}},
 \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux
+Montaigne\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/CW2pPMi.Rd b/man/CW2pPMi.Rd
index fb8da9e..e865230 100644
--- a/man/CW2pPMi.Rd
+++ b/man/CW2pPMi.Rd
@@ -70,8 +70,9 @@ should be limited to avoid artificial intensity data. If \code{P} is
 provided manually, not more than two points are extrapolated.
 }
 \section{Function version}{
- 0.2.1 (2015-11-29 17:27:48)
+ 0.2.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -116,12 +117,12 @@ lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2],
 text(0.5,6500,"PM", col = "blue", cex = .8)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). CW2pPMi(): Transform a CW-OSL curve into a pPM-OSL curve via interpolation under parabolic modulation conditions. Function version 0.2.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
-Delft University of Technology, The Netherlands\cr
-\cr R Luminescence Package Team}
+
 \references{
 Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL
 signal components. Radiation Measurements, 47, 752-758.\cr
@@ -137,5 +138,9 @@ LM-OSL curves. Radiation Measurements, 32, 141-145.
 \code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pHMi}},
 \code{\link{fit_LMCurve}}, \code{\linkS4class{RLum.Data.Curve}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)\cr\cr Based on comments and suggestions from:\cr Adrie J.J. Bos,
+Delft University of Technology, The Netherlands\cr
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/ExampleData.BINfileData.Rd b/man/ExampleData.BINfileData.Rd
index 9960d9f..1209458 100644
--- a/man/ExampleData.BINfileData.Rd
+++ b/man/ExampleData.BINfileData.Rd
@@ -45,6 +45,7 @@ the BIN-file format changed.
 \section{Version}{
  0.1
 }
+
 \examples{
 
 ##show first 5 elements of the METADATA and DATA elements in the terminal
@@ -59,4 +60,3 @@ CWOSL.SAR.Data at DATA[1:5]
 \bold{TL.SAR.Data}: unpublished data
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.CW_OSL_Curve.Rd b/man/ExampleData.CW_OSL_Curve.Rd
index ea1e008..ce38718 100644
--- a/man/ExampleData.CW_OSL_Curve.Rd
+++ b/man/ExampleData.CW_OSL_Curve.Rd
@@ -41,4 +41,3 @@ Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal
 components. Radiation Measurements, 47, 752-758.
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.DeValues.Rd b/man/ExampleData.DeValues.Rd
index 1740fad..14ef883 100644
--- a/man/ExampleData.DeValues.Rd
+++ b/man/ExampleData.DeValues.Rd
@@ -54,4 +54,3 @@ Risoe TL/OSL DA-20 reader\cr Units: \tab Values are given in Gray \cr
 Measurement Date: \tab 2012 }
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.Fading.Rd b/man/ExampleData.Fading.Rd
new file mode 100644
index 0000000..41378f6
--- /dev/null
+++ b/man/ExampleData.Fading.Rd
@@ -0,0 +1,93 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\name{ExampleData.Fading}
+\alias{ExampleData.Fading}
+\title{Example data for feldspar fading measurements}
+\format{A \code{\link{list}} with two elements, each containing a further
+\code{\link{list}} of \code{\link{data.frame}}s containing the data
+on the fading and equivalent dose measurements:
+
+\describe{
+
+\code{$fading.data}: A named \code{\link{list}} of \code{\link{data.frame}}s,
+each having three named columns (\code{LxTx, LxTx.error, timeSinceIrradiation}).\cr
+\code{..$IR50}: Fading data of the IR50 signal.\cr
+\code{..$IR100}: Fading data of the IR100 signal.\cr
+\code{..$IR150}: Fading data of the IR150 signal.\cr
+\code{..$IR225}: Fading data of the IR225 signal.\cr
+\cr\cr
+
+\code{$equivalentDose.data}: A named of \code{\link{data.frame}}s,
+each having three named columns (\code{dose, LxTx, LxTx.error}).\cr
+\code{..$IR50}: Equivalent dose measurement data of the IR50 signal.\cr
+\code{..$IR100}: Equivalent dose measurement data of the IR100 signal.\cr
+\code{..$IR150}: Equivalent dose measurement data of the IR150 signal.\cr
+\code{..$IR225}: Equivalent dose measurement data of the IR225 signal.\cr
+\cr\cr
+
+}}
+\source{
+These data were kindly provided by Georgina King. Detailed information
+on the sample UNIL/NB123 can be found in the reference given below. The raw
+data can be found in the accompanying supplementary information.
+}
+\description{
+Example data set for fading measurements of the IR50, IR100, IR150 and
+IR225 feldspar signals of sample UNIL/NB123. It further contains regular equivalent dose
+measurement data of the same sample, which can be used to apply a
+fading correction to.
+}
+\examples{
+
+## Load example data
+data("ExampleData.Fading", envir = environment())
+
+## Get fading measurement data of the IR50 signal
+IR50_fading <- ExampleData.Fading$fading.data$IR50
+head(IR50_fading)
+
+## Determine g-value and rho' for the IR50 signal
+IR50_fading.res <- analyse_FadingMeasurement(IR50_fading)
+
+## Show g-value and rho' results
+gval <- get_RLum(IR50_fading.res)
+rhop <- get_RLum(IR50_fading.res, "rho_prime")
+
+gval
+rhop
+
+## Get LxTx values of the IR50 DE measurement
+IR50_De.LxTx <- ExampleData.Fading$equivalentDose.data$IR50
+
+## Calculate the De of the IR50 signal
+IR50_De <- plot_GrowthCurve(IR50_De.LxTx,
+                                mode = "interpolation",
+                                fit.method = "EXP")
+
+## Extract the calculated De and its error
+IR50_De.res <- get_RLum(IR50_De)
+De <- c(IR50_De.res$De, IR50_De.res$De.Error)
+
+## Apply fading correction (age conversion greatly simplified)
+IR50_Age <- De / 7.00
+IR50_Age.corr <- calc_FadingCorr(IR50_Age, g_value = IR50_fading.res)
+
+
+}
+\references{
+King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016.
+Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004
+
+\bold{Details} \cr
+\tabular{ll}{
+Lab: \tab University of Lausanne \cr
+Lab-Code: \tab UNIL/NB123 \cr
+Location: \tab Namche Barwa (eastern Himalaya)\cr
+Material: \tab Coarse grained (180-212 microns) potassium feldspar \cr
+Units: \tab Values are given in seconds \cr
+Lab Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.1335 +/-
+0.004 Gy/s \cr
+Environmental Dose Rate: \tab 7.00 +/- 0.92 Gy/ka (includes internal dose rate)
+}
+}
+\keyword{datasets}
diff --git a/man/ExampleData.FittingLM.Rd b/man/ExampleData.FittingLM.Rd
index cf52a75..8468d19 100644
--- a/man/ExampleData.FittingLM.Rd
+++ b/man/ExampleData.FittingLM.Rd
@@ -26,4 +26,3 @@ Fuchs, M., Kreutzer, S., Fischer, M., Sauer, D., Soerensen, R., 2012. OSL and IR
 dating of raised beach sand deposits along the southeastern coast of Norway.
 Quaternary Geochronology, 10, 195-200.
 }
-
diff --git a/man/ExampleData.LxTxData.Rd b/man/ExampleData.LxTxData.Rd
index e1497fd..f224eff 100644
--- a/man/ExampleData.LxTxData.Rd
+++ b/man/ExampleData.LxTxData.Rd
@@ -23,4 +23,3 @@ plot(LxTxData$Dose,LxTxData$LxTx)
 \references{
 unpublished data
 }
-
diff --git a/man/ExampleData.LxTxOSLData.Rd b/man/ExampleData.LxTxOSLData.Rd
index ae100a1..720f7f9 100644
--- a/man/ExampleData.LxTxOSLData.Rd
+++ b/man/ExampleData.LxTxOSLData.Rd
@@ -23,4 +23,3 @@ plot(Tx.data)
 \references{
 unpublished data
 }
-
diff --git a/man/ExampleData.RLum.Analysis.Rd b/man/ExampleData.RLum.Analysis.Rd
index aa0097f..82c13e0 100644
--- a/man/ExampleData.RLum.Analysis.Rd
+++ b/man/ExampleData.RLum.Analysis.Rd
@@ -27,6 +27,7 @@ protocol analysis.
 \section{Version}{
  0.1
 }
+
 \examples{
 
 ##load data
@@ -45,4 +46,3 @@ Germany - a preliminary luminescence dating study. Zeitschrift fuer
 Geomorphologie 58, 5-26. doi: 10.1127/0372-8854/2012/S-00112
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.RLum.Data.Image.Rd b/man/ExampleData.RLum.Data.Image.Rd
index 1416e47..776c1cf 100644
--- a/man/ExampleData.RLum.Data.Image.Rd
+++ b/man/ExampleData.RLum.Data.Image.Rd
@@ -28,6 +28,7 @@ Measurement of Princton Instruments camera imported with the function
 \section{Version}{
  0.1
 }
+
 \examples{
 
 ##load data
@@ -38,4 +39,3 @@ plot_RLum(ExampleData.RLum.Data.Image)
 
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.XSYG.Rd b/man/ExampleData.XSYG.Rd
index 675a617..06e5d3b 100644
--- a/man/ExampleData.XSYG.Rd
+++ b/man/ExampleData.XSYG.Rd
@@ -44,6 +44,7 @@ function \code{\link{read_XSYG2R}}.
 \section{Version}{
  0.1
 }
+
 \examples{
 
 ##show data
@@ -94,4 +95,3 @@ Climatic Cycle. Boreas, 42, 664--677.
 \code{\link{plot_RLum.Analysis}}, \code{\link{plot_RLum.Data.Spectrum}}
 }
 \keyword{datasets}
-
diff --git a/man/ExampleData.portableOSL.Rd b/man/ExampleData.portableOSL.Rd
new file mode 100644
index 0000000..bb653d6
--- /dev/null
+++ b/man/ExampleData.portableOSL.Rd
@@ -0,0 +1,25 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/Luminescence-package.R
+\docType{data}
+\name{ExampleData.portableOSL}
+\alias{ExampleData.portableOSL}
+\title{Example portable OSL curve data for the package Luminescence}
+\source{
+\bold{ExampleData.portableOSL}
+
+\tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory\cr Lab-Code: \tab
+- \cr Location: \tab Nievenheim/Germany\cr Material: \tab Fine grain quartz
+\cr Reference: \tab unpublished data }
+}
+\description{
+A \code{list} of \code{\linkS4class{RLum.Analysis}} objects, each containing
+the same number of \code{\linkS4class{RLum.Data.Curve}} objects representing
+individual OSL, IRSL and dark count measurements of a sample.
+}
+\examples{
+
+data(ExampleData.portableOSL, envir = environment())
+plot_RLum(ExampleData.portableOSL)
+
+}
+\keyword{datasets}
diff --git a/man/GitHub-API.Rd b/man/GitHub-API.Rd
new file mode 100644
index 0000000..f30aca6
--- /dev/null
+++ b/man/GitHub-API.Rd
@@ -0,0 +1,100 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/github.R
+\name{GitHub-API}
+\alias{GitHub-API}
+\alias{github_commits}
+\alias{github_branches}
+\alias{github_issues}
+\title{GitHub API}
+\usage{
+github_commits(user = "r-lum", repo = "luminescence", branch = "master",
+  n = 5)
+
+github_branches(user = "r-lum", repo = "luminescence")
+
+github_issues(user = "r-lum", repo = "luminescence", verbose = TRUE)
+}
+\arguments{
+\item{user}{\code{\link{character}}: 
+GitHub user name (defaults to 'r-lum').}
+
+\item{repo}{\code{\link{character}}: 
+name of a GitHub repository (defaults to 'luminescence').}
+
+\item{branch}{\code{\link{character}}: 
+branch of a GitHub repository (defaults to 'master').}
+
+\item{n}{\code{\link{integer}}:
+number of commits returned (defaults to 5).}
+
+\item{verbose}{\code{\link{logical}}: 
+print the output to the console (defaults to \code{TRUE}).}
+}
+\value{
+\code{github_commits}: \code{\link{data.frame}} with columns:
+\tabular{ll}{
+ [ ,1] \tab SHA \cr
+ [ ,2] \tab AUTHOR \cr
+ [ ,3] \tab DATE \cr
+ [ ,4] \tab MESSAGE \cr
+}
+
+\code{github_branches}: \code{\link{data.frame}} with columns:
+\tabular{ll}{
+ [ ,1] \tab BRANCH \cr
+ [ ,2] \tab SHA \cr
+ [ ,3] \tab INSTALL \cr
+}
+
+\code{github_commits}: Nested \code{\link{list}} with \code{n} elements.
+Each commit element is a list with elements:
+\tabular{ll}{
+ [[1]] \tab NUMBER \cr
+ [[2]] \tab TITLE \cr
+ [[3]] \tab BODY \cr
+ [[4]] \tab CREATED \cr
+ [[5]] \tab UPDATED \cr
+ [[6]] \tab CREATOR \cr
+ [[7]] \tab URL \cr
+ [[8]] \tab STATUS \cr
+}
+}
+\description{
+R Interface to the GitHub API v3.
+}
+\details{
+These functions can be used to query a specific repository hosted on GitHub. \cr
+
+\code{github_commits} lists the most recent \code{n} commits of a specific
+branch of a repository.
+
+\code{github_branches} can be used to list all current branches of a
+repository and returns the corresponding SHA hash as well as an installation
+command to install the branch in R via the 'devtools' package.
+
+\code{github_issues} lists all open issues for a repository in valid YAML.
+}
+\section{Function version}{
+ 0.1.0
+}
+
+\examples{
+
+\dontrun{
+github_branches(user = "r-lum", repo = "luminescence")
+github_issues(user = "r-lum", repo = "luminescence")
+github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 10)
+}
+
+} 
+
+\section{How to cite}{
+Burow, C. (2017). GitHub-API(): GitHub API. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\references{
+GitHub Developer API v3. \url{https://developer.github.com/v3/}, last accessed: 10/01/2017.
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
diff --git a/man/Luminescence-package.Rd b/man/Luminescence-package.Rd
index 5368a90..c88367a 100644
--- a/man/Luminescence-package.Rd
+++ b/man/Luminescence-package.Rd
@@ -2,8 +2,8 @@
 % Please edit documentation in R/Luminescence-package.R
 \docType{package}
 \name{Luminescence-package}
-\alias{Luminescence}
 \alias{Luminescence-package}
+\alias{Luminescence}
 \title{Comprehensive Luminescence Dating Data Analysis}
 \description{
 A collection of various R functions for the purpose of Luminescence dating
@@ -13,57 +13,79 @@ plotting of equivalent dose distributions.
 }
 \details{
 \tabular{ll}{ Package: \tab Luminescence\cr Type: \tab Package\cr Version:
-\tab 0.6.4 \cr Date: \tab 2016-09-09 \cr License: \tab GPL-3\cr }
+\tab 0.7.5 \cr Date: \tab 2017-06-30 \cr License: \tab GPL-3\cr }
+}
+\references{
+Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M.,
+Schmidt, C., 2013. A practical guide to the R package Luminescence.
+Ancient TL, 31, 11-18.
+
+Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot:
+visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7.
+http://dx.doi.org/10.1016/j.quageo.2015.09.003
+
+Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C.,
+Fuchs, M., 2015. Data processing in luminescence dating analysis: An
+exemplary workflow using the R package 'Luminescence'. Quaternary
+International, 362,8-13. http://dx.doi.org/10.1016/j.quaint.2014.06.034
+
+Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M.,
+2012. Introducing an R package for luminescence dating analysis. Ancient TL,
+30, 1-8.
+
+Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
+Ancient TL 33, 16-21.
 }
 \author{
-\bold{Authors} (alphabetic order)
+\bold{Full list of authors and contributors} (alphabetic order)
 
 \tabular{ll}{
 Christoph Burow \tab University of Cologne, Germany \cr
+Claire Christophe \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
 Michael Dietze \tab GFZ Helmholtz Centre Potsdam, Germany \cr
 Julie Durcan \tab University of Oxford, United Kingdom \cr
 Manfred Fischer\tab University of Bayreuth, Germany \cr
 Margret C. Fuchs \tab Helmholtz-Zentrum Dresden-Rossendorf, Helmholtz-Institute Freiberg for Resource Technology,
 Freiberg, Germany \cr
 Johannes Friedrich \tab University of Bayreuth, Germany \cr
-Georgina King \tab University of Cologne, Germany \cr
+Guillaume Guerin \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+Georgina King \tab Institute of Geological Sciences, University of Bern, Switzerland \cr
 Sebastian Kreutzer \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
 Norbert Mercier \tab IRAMAT-CRP2A, Universite Bordeaux Montaigne, France \cr
+Anne Philippe \tab  Universite de Nantes and ANJA INRIA, Rennes, France \cr
 Christoph Schmidt \tab University of Bayreuth, Germany \cr
-Rachel K. Smedley \tab Aberystwyth University, United Kingdom
+Rachel K. Smedley \tab Aberystwyth University, United Kingdom \cr
+Antoine Zink \tab C2RMF, Palais du Louvre, Paris, France
 }
 
-\bold{Beta-Tester}
-
-Thomas Kolb, University of Bayreuth, Germany\cr
-
-\bold{Supervisor}
+\bold{Supervisor of the initial version in 2012}
 
 Markus Fuchs, Justus-Liebig-University Giessen, Germany\cr
 
 \bold{Support contact}
 
-\email{developers at r-luminescence.de}\cr
+\email{developers at r-luminescence.org}\cr
 
 We may further encourage the usage of our support forum. For this please
 visit our project website (link below).
 
 \bold{Bug reporting}
 
-\email{bugtracker at r-luminescence.de} \cr
+\email{developers at r-luminescence.org} or \cr
+\url{https://github.com/R-Lum/Luminescence/issues} \cr
 
 \bold{Project website}
 
-\url{http://www.r-luminescence.de}\cr
+\url{http://www.r-luminescence.org}\cr
 
 \bold{Project source code repository}\cr
 \url{https://github.com/R-Lum/Luminescence}\cr
 
 \bold{Related package projects}\cr
 \url{https://cran.r-project.org/package=RLumShiny}\cr
-\url{http://shiny.r-luminescence.de}\cr
+\url{http://shiny.r-luminescence.org}\cr
 \url{https://cran.r-project.org/package=RLumModel}\cr
-\url{http://model.r-luminescence.de}\cr
+\url{http://model.r-luminescence.org}\cr
 
 \bold{Package maintainer}
 
@@ -74,29 +96,7 @@ France, \cr \email{sebastian.kreutzer at u-bordeaux-montaigne.fr}
 
 Cooperation and personal exchange between the developers is gratefully
 funded by the DFG (SCHM 3051/3-1) in the framework of the program
-"Scientific Networks". Project title: "Lum.Network: Ein
+"Scientific Networks". Project title: "RLum.Network: Ein
 Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2017)
 }
-\references{
-Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M.,
-Schmidt, C., 2013. A practical guide to the R package Luminescence.
-Ancient TL, 31, 11-18.
-
-Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot:
-visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7.
-http://dx.doi.org/10.1016/j.quageo.2015.09.003
-
-Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C.,
-Fuchs, M., 2015. Data processing in luminescence dating analysis: An
-exemplary workflow using the R package 'Luminescence'. Quaternary
-International, 362,8-13. http://dx.doi.org/10.1016/j.quaint.2014.06.034
-
-Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M.,
-2012. Introducing an R package for luminescence dating analysis. Ancient TL,
-30, 1-8.
-
-Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
-Ancient TL 33, 16-21.
-}
 \keyword{package}
-
diff --git a/man/PSL2Risoe.BINfileData.Rd b/man/PSL2Risoe.BINfileData.Rd
new file mode 100644
index 0000000..dd2124e
--- /dev/null
+++ b/man/PSL2Risoe.BINfileData.Rd
@@ -0,0 +1,65 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/PSL2Risoe.BINfileData.R
+\name{PSL2Risoe.BINfileData}
+\alias{PSL2Risoe.BINfileData}
+\title{Convert portable OSL data to an Risoe.BINfileData object}
+\usage{
+PSL2Risoe.BINfileData(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}):
+\code{RLum.Analysis} object produced by \code{\link{read_PSL2R}}}
+
+\item{...}{currently not used.}
+}
+\value{
+Returns an S4 \code{\linkS4class{Risoe.BINfileData}} object that can
+be used to write a BIN file using \code{\link{write_R2BIN}}.
+}
+\description{
+Converts an \code{RLum.Analysis} object produced by the function \code{read_PSL2R()} to
+an \code{Risoe.BINfileData} object \bold{(BETA)}.
+}
+\details{
+This function converts an \code{\linkS4class{RLum.Analysis}} object that was produced
+by the \code{\link{read_PSL2R}} function to an \code{\linkS4class{Risoe.BINfileData}}.
+The \code{Risoe.BINfileData} can be used to write a Risoe BIN file via
+\code{\link{write_R2BIN}}.
+}
+\section{Function version}{
+ 0.0.1 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+# (1) load and plot example data set
+data("ExampleData.portableOSL", envir = environment())
+plot_RLum(ExampleData.portableOSL)
+
+# (2) merge all RLum.Analysis objects into one
+merged <- merge_RLum(ExampleData.portableOSL)
+merged
+
+# (3) convert to RisoeBINfile object
+bin <- PSL2Risoe.BINfileData(merged)
+bin
+
+# (4) write Risoe BIN file
+\dontrun{
+write_R2BIN(bin, "~/portableOSL.binx")
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{Risoe.BINfileData}}
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Burow, C. (2017). PSL2Risoe.BINfileData(): Convert portable OSL data to an Risoe.BINfileData object. Function version 0.0.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/RLum-class.Rd b/man/RLum-class.Rd
index 5e9d679..659ce78 100644
--- a/man/RLum-class.Rd
+++ b/man/RLum-class.Rd
@@ -21,6 +21,7 @@ Abstract class for data in the package Luminescence
 \itemize{
 \item \code{replicate_RLum}: Replication method RLum-objects
 }}
+
 \section{Slots}{
 
 \describe{
@@ -36,6 +37,7 @@ is called.}
 \item{\code{.pid}}{Object of class \code{\link{character}} for a parent id. This allows nesting RLum-objects
 at will. The parent id can be the uid of another object.}
 }}
+
 \note{
 \code{RLum} is a virtual class.
 }
@@ -47,16 +49,21 @@ from it.
 \section{Class version}{
  0.4.0
 }
+
 \examples{
 
 showClass("RLum")
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-}
 \seealso{
 \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Analysis}}
 }
-\keyword{classes}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). RLum-class(): Class 'RLum'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{classes}
diff --git a/man/RLum.Analysis-class.Rd b/man/RLum.Analysis-class.Rd
index 99e8c5d..5437f9e 100644
--- a/man/RLum.Analysis-class.Rd
+++ b/man/RLum.Analysis-class.Rd
@@ -3,12 +3,13 @@
 \docType{class}
 \name{RLum.Analysis-class}
 \alias{RLum.Analysis-class}
+\alias{show,RLum.Analysis-method}
+\alias{set_RLum,RLum.Analysis-method}
 \alias{get_RLum,RLum.Analysis-method}
+\alias{structure_RLum,RLum.Analysis-method}
 \alias{length_RLum,RLum.Analysis-method}
 \alias{names_RLum,RLum.Analysis-method}
-\alias{set_RLum,RLum.Analysis-method}
-\alias{show,RLum.Analysis-method}
-\alias{structure_RLum,RLum.Analysis-method}
+\alias{smooth_RLum,RLum.Analysis-method}
 \title{Class \code{"RLum.Analysis"}}
 \usage{
 \S4method{show}{RLum.Analysis}(object)
@@ -19,13 +20,15 @@
 \S4method{get_RLum}{RLum.Analysis}(object, record.id = NULL,
   recordType = NULL, curveType = NULL, RLum.type = NULL,
   protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE,
-  info.object = NULL)
+  info.object = NULL, subset = NULL)
 
 \S4method{structure_RLum}{RLum.Analysis}(object, fullExtent = FALSE)
 
 \S4method{length_RLum}{RLum.Analysis}(object)
 
 \S4method{names_RLum}{RLum.Analysis}(object)
+
+\S4method{smooth_RLum}{RLum.Analysis}(object, ...)
 }
 \arguments{
 \item{object}{\code{[show_RLum]}\code{[get_RLum]}\code{[names_RLum]}\code{[length_RLum]}
@@ -81,9 +84,15 @@ easier, however, if this method is used within a loop this might undesired.}
 \item{info.object}{[\code{get_RLum}] \code{\link{character}} (optional): name of the wanted info
 element}
 
+\item{subset}{\code{\link{expression}} (optional): logical expression indicating elements or rows
+to keep: missing values are taken as false. This argument takes precedence over all
+other arguments, meaning they are not considered when subsetting the object.}
+
 \item{fullExtent}{[structure_RLum] \code{\link{logical}} (with default): extents the returned \code{data.frame}
 to its full extent, i.e. all info elements are part of the return as well. The default valule
 is \code{FALSE} as the data frame might become rather big.}
+
+\item{...}{further arguments passed to underlying methods}
 }
 \value{
 \bold{\code{get_RLum}}:\cr
@@ -105,6 +114,10 @@ Returns the number records in this object.
 \bold{\code{names_RLum}}\cr
 
 Returns the names of the record types (recordType) in this object.
+
+\bold{\code{smooth_RLum}}\cr
+
+Same object as input, after smoothing
 }
 \description{
 Object class to represent analysis data for protocol analysis, i.e. all curves, spectra etc.
@@ -133,7 +146,12 @@ Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum
 \item \code{length_RLum}: Returns the length of the object, i.e., number of stored records.
 
 \item \code{names_RLum}: Returns the names of the \code{\linkS4class{RLum.Data}} objects objects (same as shown with the show method)
+
+\item \code{smooth_RLum}: Smoothing of \code{RLum.Data} objects contained in this \code{RLum.Analysis} object
+\code{\link[zoo]{rollmean}} or \code{\link[zoo]{rollmedian}}.
+In particular the internal function \code{.smoothing} is used.
 }}
+
 \section{Slots}{
 
 \describe{
@@ -141,6 +159,7 @@ Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum
 
 \item{\code{records}}{Object of class \code{\link{list}} containing objects of class \code{\linkS4class{RLum.Data}}}
 }}
+
 \note{
 The method \code{\link{structure_RLum}} is currently just avaiblable for objects
 containing \code{\linkS4class{RLum.Data.Curve}}.
@@ -151,8 +170,9 @@ containing \code{\linkS4class{RLum.Data.Curve}}.
 }
 
 \section{Class version}{
- 0.4.6
+ 0.4.8
 }
+
 \examples{
 
 showClass("RLum.Analysis")
@@ -171,14 +191,18 @@ get_RLum(IRSAR.RF.Data)
 get_RLum(IRSAR.RF.Data, record.id = 1, drop = FALSE)
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-}
 \seealso{
 \code{\link{Risoe.BINfileData2RLum.Analysis}},
 \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). RLum.Analysis-class(): Class 'RLum.Analysis'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
 \keyword{classes}
 \keyword{methods}
-
diff --git a/man/RLum.Data-class.Rd b/man/RLum.Data-class.Rd
index 9380293..0c9466e 100644
--- a/man/RLum.Data-class.Rd
+++ b/man/RLum.Data-class.Rd
@@ -18,17 +18,17 @@ from it.
 \section{Class version}{
  0.2.1
 }
+
 \examples{
 
 showClass("RLum.Data")
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-}
 \seealso{
 \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Spectrum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+}
 \keyword{classes}
-
diff --git a/man/RLum.Data.Curve-class.Rd b/man/RLum.Data.Curve-class.Rd
index 5f3b2fc..b590b03 100644
--- a/man/RLum.Data.Curve-class.Rd
+++ b/man/RLum.Data.Curve-class.Rd
@@ -3,12 +3,13 @@
 \docType{class}
 \name{RLum.Data.Curve-class}
 \alias{RLum.Data.Curve-class}
-\alias{bin_RLum.Data,RLum.Data.Curve-method}
+\alias{show,RLum.Data.Curve-method}
+\alias{set_RLum,RLum.Data.Curve-method}
 \alias{get_RLum,RLum.Data.Curve-method}
 \alias{length_RLum,RLum.Data.Curve-method}
 \alias{names_RLum,RLum.Data.Curve-method}
-\alias{set_RLum,RLum.Data.Curve-method}
-\alias{show,RLum.Data.Curve-method}
+\alias{bin_RLum.Data,RLum.Data.Curve-method}
+\alias{smooth_RLum,RLum.Data.Curve-method}
 \title{Class \code{"RLum.Data.Curve"}}
 \usage{
 \S4method{show}{RLum.Data.Curve}(object)
@@ -24,6 +25,9 @@
 \S4method{names_RLum}{RLum.Data.Curve}(object)
 
 \S4method{bin_RLum.Data}{RLum.Data.Curve}(object, bin_size = 2)
+
+\S4method{smooth_RLum}{RLum.Data.Curve}(object, k = NULL, fill = NA,
+  align = "right", method = "mean")
 }
 \arguments{
 \item{object}{[\code{show_RLum}][\code{get_RLum}][\code{length_RLum}][\code{names_RLum}] an object of
@@ -55,6 +59,18 @@ element}
 
 \item{bin_size}{[\code{bin_RLum}] \code{\link{integer}} (with default): set number of channels
 used for each bin, e.g. \code{bin_size = 2} means that two channels are binned.}
+
+\item{k}{[\code{smooth_RLum}] \code{\link{integer}} (with default): window for the rolling mean; must be odd for rollmedian.
+If nothing is set k is set automatically}
+
+\item{fill}{[\code{smooth_RLum}] \code{\link{numeric}} (with default): a vector defining the left and the right hand data}
+
+\item{align}{[\code{smooth_RLum}] \code{\link{character}} (with default): specifying whether the index of the result should be
+left- or right-aligned or centered (default) compared to the rolling window of observations, allowed
+\code{"right"}, \code{"center"} and \code{left}}
+
+\item{method}{[\code{smooth_RLum}] \code{\link{character}} (with default): defines which method should be applied for the
+smoothing: \code{"mean"} or \code{"median"}}
 }
 \value{
 \bold{\code{set_RLum}}\cr
@@ -77,6 +93,10 @@ Names of the info elements (slot \code{info})
 \bold{\code{bin_RLum.Data}}\cr
 
 Same object as input, after applying the binning.
+
+\bold{\code{smooth_RLum}}\cr
+
+Same object as input, after smoothing
 }
 \description{
 Class for representing luminescence curve data.
@@ -98,7 +118,11 @@ value time/temperature of the curve (corresponding to the stimulation length)
 \item \code{names_RLum}: Returns the names info elements coming along with this curve object
 
 \item \code{bin_RLum.Data}: Allows binning of specific objects
+
+\item \code{smooth_RLum}: Smoothing of RLum.Data.Curve objects using the function \code{\link[zoo]{rollmean}} or \code{\link[zoo]{rollmedian}}.
+In particular the internal function \code{.smoothing} is used.
 }}
+
 \section{Slots}{
 
 \describe{
@@ -113,6 +137,7 @@ data = Your.RLum.Data.Curve, recordType = 'never seen before')}
 would just change the recordType. Missing arguements  the value is taken from the input object
 in 'data' (which is already an RLum.Data.Curve object in this example)}
 }}
+
 \note{
 The class should only contain data for a single curve. For additional
 elements the slot \code{info} can be used (e.g. providing additional heating
@@ -126,8 +151,9 @@ namely: \code{\link{Risoe.BINfileData2RLum.Analysis}}, \code{\link{read_XSYG2R}}
 }
 
 \section{Class version}{
- 0.4.1
+ 0.5.0
 }
+
 \examples{
 
 showClass("RLum.Data.Curve")
@@ -136,12 +162,16 @@ showClass("RLum.Data.Curve")
 set_RLum(class = "RLum.Data.Curve")
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-}
 \seealso{
 \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
 \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
 }
-\keyword{classes}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). RLum.Data.Curve-class(): Class 'RLum.Data.Curve'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{classes}
diff --git a/man/RLum.Data.Image-class.Rd b/man/RLum.Data.Image-class.Rd
index 0e0c383..3dd7566 100644
--- a/man/RLum.Data.Image-class.Rd
+++ b/man/RLum.Data.Image-class.Rd
@@ -3,10 +3,10 @@
 \docType{class}
 \name{RLum.Data.Image-class}
 \alias{RLum.Data.Image-class}
+\alias{show,RLum.Data.Image-method}
+\alias{set_RLum,RLum.Data.Image-method}
 \alias{get_RLum,RLum.Data.Image-method}
 \alias{names_RLum,RLum.Data.Image-method}
-\alias{set_RLum,RLum.Data.Image-method}
-\alias{show,RLum.Data.Image-method}
 \title{Class \code{"RLum.Data.Image"}}
 \usage{
 \S4method{show}{RLum.Data.Image}(object)
@@ -76,6 +76,7 @@ provided, the raw image data (RasterBrick) will be returned.
 
 \item \code{names_RLum}: Returns the names info elements coming along with this curve object
 }}
+
 \section{Slots}{
 
 \describe{
@@ -89,6 +90,7 @@ are measured or predefined}
 
 \item{\code{info}}{Object of class \code{\link{list}} containing further meta information objects}
 }}
+
 \note{
 The class should only contain data for a set of images. For additional
 elements the slot \code{info} can be used.
@@ -101,6 +103,7 @@ elements the slot \code{info} can be used.
 \section{Class version}{
  0.4.0
 }
+
 \examples{
 
 showClass("RLum.Data.Image")
@@ -109,12 +112,16 @@ showClass("RLum.Data.Image")
 set_RLum(class = "RLum.Data.Image")
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-}
 \seealso{
 \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
 \code{\link{plot_RLum}}, \code{\link{read_SPE2R}}
 }
-\keyword{classes}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). RLum.Data.Image-class(): Class 'RLum.Data.Image'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
+\keyword{classes}
diff --git a/man/RLum.Data.Spectrum-class.Rd b/man/RLum.Data.Spectrum-class.Rd
index 3c930af..d1699b6 100644
--- a/man/RLum.Data.Spectrum-class.Rd
+++ b/man/RLum.Data.Spectrum-class.Rd
@@ -3,10 +3,10 @@
 \docType{class}
 \name{RLum.Data.Spectrum-class}
 \alias{RLum.Data.Spectrum-class}
+\alias{show,RLum.Data.Spectrum-method}
+\alias{set_RLum,RLum.Data.Spectrum-method}
 \alias{get_RLum,RLum.Data.Spectrum-method}
 \alias{names_RLum,RLum.Data.Spectrum-method}
-\alias{set_RLum,RLum.Data.Spectrum-method}
-\alias{show,RLum.Data.Spectrum-method}
 \title{Class \code{"RLum.Data.Spectrum"}}
 \usage{
 \S4method{show}{RLum.Data.Spectrum}(object)
@@ -76,6 +76,7 @@ is provided, the raw curve data (matrix) will be returned
 
 \item \code{names_RLum}: Returns the names info elements coming along with this curve object
 }}
+
 \section{Slots}{
 
 \describe{
@@ -89,6 +90,7 @@ Row labels indicate wavelength/pixel values, column labels are temperature or ti
 
 \item{\code{info}}{Object of class \code{\link{list}} containing further meta information objects}
 }}
+
 \note{
 The class should only contain data for a single spectra data set. For
 additional elements the slot \code{info} can be used. Objects from this class are automatically
@@ -102,6 +104,7 @@ created by, e.g., \code{\link{read_XSYG2R}}
 \section{Class version}{
  0.4.0
 }
+
 \examples{
 
 showClass("RLum.Data.Spectrum")
@@ -118,12 +121,16 @@ get_RLum(TL.Spectrum)
 plot_RLum(TL.Spectrum)
 }
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-}
 \seealso{
 \code{\linkS4class{RLum}}, \code{\linkS4class{RLum.Data}},
 \code{\link{plot_RLum}}
 }
-\keyword{classes}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). RLum.Data.Spectrum-class(): Class 'RLum.Data.Spectrum'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
+\keyword{classes}
diff --git a/man/RLum.Results-class.Rd b/man/RLum.Results-class.Rd
index 3d9904c..3f57181 100644
--- a/man/RLum.Results-class.Rd
+++ b/man/RLum.Results-class.Rd
@@ -3,11 +3,11 @@
 \docType{class}
 \name{RLum.Results-class}
 \alias{RLum.Results-class}
+\alias{show,RLum.Results-method}
+\alias{set_RLum,RLum.Results-method}
 \alias{get_RLum,RLum.Results-method}
 \alias{length_RLum,RLum.Results-method}
 \alias{names_RLum,RLum.Results-method}
-\alias{set_RLum,RLum.Results-method}
-\alias{show,RLum.Results-method}
 \title{Class \code{"RLum.Results"}}
 \usage{
 \S4method{show}{RLum.Results}(object)
@@ -94,11 +94,13 @@ receiving function if results are pipped.
 
 \item \code{names_RLum}: Returns the names data.objects
 }}
+
 \section{Slots}{
 
 \describe{
 \item{\code{data}}{Object of class "list" containing output data}
 }}
+
 \note{
 The class is intended to store results from functions to be used by
 other functions. The data in the object should always be accessed by the
@@ -112,6 +114,7 @@ method \code{get_RLum}.
 \section{Class version}{
  0.5.1
 }
+
 \examples{
 
 showClass("RLum.Results")
@@ -141,13 +144,17 @@ get_RLum(dose.rate, data.object = "parameters")
 dose.rate$parameters
 
 }
+\seealso{
+\code{\linkS4class{RLum}}, \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
+}
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). RLum.Results-class(): Class 'RLum.Results'. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\seealso{
-\code{\linkS4class{RLum}}, \code{\link{plot_RLum}}, \code{\link{merge_RLum}}
-}
+
 \keyword{classes}
 \keyword{methods}
-
diff --git a/man/Risoe.BINfileData-class.Rd b/man/Risoe.BINfileData-class.Rd
index 4a8eadc..6fb0f55 100644
--- a/man/Risoe.BINfileData-class.Rd
+++ b/man/Risoe.BINfileData-class.Rd
@@ -3,14 +3,15 @@
 \docType{class}
 \name{Risoe.BINfileData-class}
 \alias{Risoe.BINfileData-class}
-\alias{get_Risoe.BINfileData,Risoe.BINfileData-method}
-\alias{set_Risoe.BINfileData,data.frame,list-method}
 \alias{show,Risoe.BINfileData-method}
+\alias{set_Risoe.BINfileData,ANY-method}
+\alias{get_Risoe.BINfileData,Risoe.BINfileData-method}
 \title{Class \code{"Risoe.BINfileData"}}
 \usage{
 \S4method{show}{Risoe.BINfileData}(object)
 
-\S4method{set_Risoe.BINfileData}{data.frame,list}(METADATA, DATA, .RESERVED)
+\S4method{set_Risoe.BINfileData}{ANY}(METADATA = data.frame(),
+  DATA = list(), .RESERVED = list())
 
 \S4method{get_Risoe.BINfileData}{Risoe.BINfileData}(object, ...)
 }
@@ -41,6 +42,7 @@ This construction method is intended for internal usage only.
 \item \code{get_Risoe.BINfileData}: Formal get-method for Risoe.BINfileData object. It does not allow accessing
 the object directly, it is just showing a terminal message.
 }}
+
 \section{Slots}{
 
 \describe{
@@ -50,9 +52,14 @@ the object directly, it is just showing a terminal message.
 
 \item{\code{.RESERVED}}{Object of class "list" containing list of undocumented raw values for internal use only.}
 }}
+
 \note{
 \bold{Internal METADATA - object structure}
 
+This structure is compatible with BIN-files version 03-08, however, it does not follow (in its
+sequential arrangment) the manual provided by the manufacturer,
+but an own structure accounting for the different versions.
+
 \tabular{rllll}{
 \bold{#} \tab \bold{Name} \tab \bold{Data Type} \tab \bold{V} \tab \bold{Description} \cr
 [,1]  \tab ID  \tab \code{numeric} \tab RLum \tab Unique record ID (same ID as in slot \code{DATA})\cr
@@ -108,20 +115,20 @@ the object directly, it is just showing a terminal message.
 [,51] \tab TIMESINCEIRR \tab \code{integer} \tab 06-08 \tab Time since irradiation (s)\cr
 [,52] \tab TIMETICK \tab \code{numeric} \tab 06-08 \tab Time tick for pulsing (s)\cr
 [,53] \tab ONTIME \tab \code{integer} \tab 06-08 \tab On-time for pulsing (in time ticks)\cr
-[,54] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
-[,55] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
-[,56] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
-[,57] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
-[,58] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
-[,59] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
-[,60] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
-[,61] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
-[,62] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
-[,63] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
-[,64] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
-[,65] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
-[,66] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
-[,67] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name\cr
+[,54] \tab OFFTIME \tab \code{integer} \tab 03 \tab Off-time for pulsed stimulation (in s) \cr
+[,55] \tab STIMPERIOD \tab \code{integer} \tab 06-08 \tab Stimulation period (on+off in time ticks)\cr
+[,56] \tab GATE_ENABLED \tab \code{raw} \tab 06-08 \tab PMT signal gating enabled\cr
+[,57] \tab ENABLE_FLAGS \tab \code{raw} \tab 06-08 \tab PMT signal gating  enabled\cr
+[,58] \tab GATE_START \tab \code{integer} \tab 06-08 \tab Start gating (in time ticks)\cr
+[,59] \tab GATE_STOP \tab \code{ingeter} \tab 06-08 \tab Stop gating (in time ticks), 'Gateend' for version 04, here only GATE_STOP is used\cr
+[,60] \tab PTENABLED \tab \code{raw} \tab 06-08 \tab Photon time enabled\cr
+[,61] \tab DTENABLED \tab \code{raw} \tab 06-08 \tab PMT dead time correction enabled\cr
+[,62] \tab DEADTIME \tab \code{numeric} \tab 06-08 \tab PMT dead time (s)\cr
+[,63] \tab MAXLPOWER \tab \code{numeric} \tab 06-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr
+[,64] \tab XRF_ACQTIME \tab \code{numeric} \tab 06-08 \tab XRF acquisition time (s)\cr
+[,65] \tab XRF_HV \tab \code{numeric} \tab 06-08 \tab XRF X-ray high voltage (V)\cr
+[,66] \tab XRF_CURR \tab \code{integer} \tab 06-08 \tab XRF X-ray current (uA)\cr
+[,67] \tab XRF_DEADTIMEF \tab \code{numeric} \tab 06-08 \tab XRF dead time fraction\cr
 [,68] \tab DETECTOR_ID \tab \code{raw} \tab 07-08 \tab Detector ID\cr
 [,69] \tab LOWERFILTER_ID \tab \code{integer} \tab 07-08 \tab Lower filter ID in reader\cr
 [,70] \tab UPPERFILTER_ID \tab \code{integer} \tab 07-08 \tab Uper filter ID in reader\cr
@@ -132,10 +139,9 @@ the object directly, it is just showing a terminal message.
 [,75] \tab MARKPOS_Y2 \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr
 [,76] \tab MARKPOS_X3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
 [,77] \tab MARKPOS_Y3 \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr
-[,78] \tab MARKPOS_X4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
-[,79] \tab MARKPOS_Y4 \tab \code{numeric} \tab 08 \tab Coordinates marker position 4 \cr
-[,80] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
-[,81] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown
+[,78] \tab EXTR_START \tab \code{numeric} \tab 08 \tab usage unknown \cr
+[,79] \tab EXTR_END \tab \code{numeric} \tab 08 \tab usage unknown\cr
+[,80] \tab SEQUENCE \tab \code{character} \tab 03-04 \tab Sequence name
 } V = BIN-file version (RLum means that it does not depend on a specific BIN
 version)\cr
 
@@ -178,17 +184,19 @@ Nutech)
 }
 
 \section{Function version}{
- 0.3.0
+ 0.3.3
 }
+
 \examples{
 
 showClass("Risoe.BINfileData")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). Risoe.BINfileData-class(): Class 'Risoe.BINfileData'. Function version 0.3.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Risoe DTU, 2013. The Sequence Editor User Manual - Feb 2013 and Risoe DTU, 2016. The
 Sequence Editor User Manual - Feburar 2016
@@ -200,5 +208,8 @@ Sequence Editor User Manual - Feburar 2016
 \code{\link{write_R2BIN}},\code{\link{merge_Risoe.BINfileData}},
 \code{\link{Risoe.BINfileData2RLum.Analysis}},
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{classes}
-
diff --git a/man/Risoe.BINfileData2RLum.Analysis.Rd b/man/Risoe.BINfileData2RLum.Analysis.Rd
index 96be78b..234a0c7 100644
--- a/man/Risoe.BINfileData2RLum.Analysis.Rd
+++ b/man/Risoe.BINfileData2RLum.Analysis.Rd
@@ -6,7 +6,7 @@
 \usage{
 Risoe.BINfileData2RLum.Analysis(object, pos = NULL, grain = NULL,
   run = NULL, set = NULL, ltype = NULL, dtype = NULL,
-  protocol = "unknown", txtProgressBar = FALSE)
+  protocol = "unknown", keep.empty = TRUE, txtProgressBar = FALSE)
 }
 \arguments{
 \item{object}{\code{\linkS4class{Risoe.BINfileData}} (\bold{required}):
@@ -41,6 +41,10 @@ limit the converted data. Commonly allowed values are listed in \code{\linkS4cla
 \item{protocol}{\code{\link{character}} (optional): sets protocol type for
 analysis object. Value may be used by subsequent analysis functions.}
 
+\item{keep.empty}{\code{\link{logical}} (with default): If \code{TRUE} (default)
+an \code{RLum.Analysis} object is returned even if it does not contain any
+records. Set to \code{FALSE} to discard all empty objects.}
+
 \item{txtProgressBar}{\link{logical} (with default): enables or disables
 \code{\link{txtProgressBar}}.}
 }
@@ -63,8 +67,9 @@ The \code{protocol} argument of the \code{\linkS4class{RLum.Analysis}}
 object is set to 'unknown' if not stated otherwise.
 }
 \section{Function version}{
- 0.4.1 (2016-09-09 10:32:17)
+ 0.4.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load data
@@ -73,15 +78,19 @@ data(ExampleData.BINfileData, envir = environment())
 ##convert values for position 1
 Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). Risoe.BINfileData2RLum.Analysis(): Convert Risoe.BINfileData object to an RLum.Analysis object. Function version 0.4.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
 \seealso{
 \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}, \code{\link{read_BIN2R}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/Second2Gray.Rd b/man/Second2Gray.Rd
index c7f9253..71855a5 100644
--- a/man/Second2Gray.Rd
+++ b/man/Second2Gray.Rd
@@ -62,8 +62,9 @@ stopped. Furthermore, if a \code{data.frame} is provided for the dose rate value
 be of the same length as the data frame provided with the argument \code{data}
 }
 \section{Function version}{
- 0.6.0 (2015-11-29 17:27:48)
+ 0.6.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -89,18 +90,22 @@ data(ExampleData.DeValues, envir = environment())
 # apply dose.rate to convert De(s) to De(Gy)
 Second2Gray(ExampleData.DeValues$BT998, dose.rate)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Dietze, M., Fuchs, M.C., Fuchs, M. (2017). Second2Gray(): Converting equivalent dose values from seconds (s) to gray (Gy). Function version 0.6.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France),\cr Michael Dietze, GFZ Potsdam (Germany),\cr Margret C. Fuchs, HZDR,
-Helmholtz-Institute Freiberg for Resource Technology
-(Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Aitken, M.J., 1985. Thermoluminescence dating. Academic Press.
 }
 \seealso{
 \code{\link{calc_SourceDoseRate}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Michael Dietze, GFZ Potsdam (Germany),\cr Margret C. Fuchs, HZDR,
+Helmholtz-Institute Freiberg for Resource Technology
+(Germany)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/analyse_FadingMeasurement.Rd b/man/analyse_FadingMeasurement.Rd
new file mode 100644
index 0000000..0db5b1c
--- /dev/null
+++ b/man/analyse_FadingMeasurement.Rd
@@ -0,0 +1,149 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_FadingMeasurement.R
+\name{analyse_FadingMeasurement}
+\alias{analyse_FadingMeasurement}
+\title{Analyse fading measurements and returns the fading rate per decade (g-value)}
+\usage{
+analyse_FadingMeasurement(object, structure = c("Lx", "Tx"), signal.integral,
+  background.integral, t_star = "half", n.MC = 100, verbose = TRUE,
+  plot = TRUE, plot.single = FALSE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}): input object with the
+measurement data. Alternatively, a \code{\link{list}} containing \code{\linkS4class{RLum.Analysis}}
+objects or a \code{\link{data.frame}} with three columns
+(x = LxTx, y = LxTx error, z = time since irradiation) can be provided.
+Can also be a wide table, i.e. a \code{\link{data.frame}} with a number of colums divisible by 3
+and where each triplet has the before mentioned column structure.}
+
+\item{structure}{\code{\link{character}} (with default): sets the structure of the measurement
+data. Allowed are \code{'Lx'} or \code{c('Lx','Tx')}. Other input is ignored}
+
+\item{signal.integral}{\code{\link{vector}} (\bold{required}): vector with the
+limits for the signal integral. Not required if a \code{data.frame} with LxTx values are
+provided.}
+
+\item{background.integral}{\code{\link{vector}} (\bold{required}): vector with the
+bounds for the background integral. Not required if a \code{data.frame} with LxTx values are
+provided.}
+
+\item{t_star}{\code{\link{character}} (with default): method for calculating the time elasped
+since irradiaton. Options are: \code{'half'}, which is \eqn{t_star := t_1 + (t_2 - t_1)/2} (Auclair et al., 2003)
+and \code{'end'}, which takes the time between irradiation and the measurement step. Default is \code{'half'}}
+
+\item{n.MC}{\code{\link{integer}} (with default): number for Monte Carlo runs for the error
+estimation}
+
+\item{verbose}{\code{\link{logical}} (with default): enables/disables verbose mode}
+
+\item{plot}{\code{\link{logical}} (with default): enables/disables plot output}
+
+\item{plot.single}{\code{\link{logical}} (with default): enables/disables single plot
+mode, i.e. one plot window per plot. Alternatively a vector specifying the plot to be drawn, e.g.,
+\code{plot.single = c(3,4)} draws only the last two plots}
+
+\item{\dots}{(optional) further arguments that can be passed to internally used functions (see details)}
+}
+\value{
+An \code{\linkS4class{RLum.Results}} object is returned:
+
+Slot: \bold{@data}\cr
+
+\tabular{lll}{
+\bold{OBJECT} \tab \code{TYPE} \tab \code{COMMENT}\cr
+\code{fading_results} \tab \code{data.frame} \tab results of the fading measurement in a table \cr
+\code{fit} \tab \code{lm} \tab object returned by the used linear fitting function \code{\link[stats]{lm}}\cr
+\code{rho_prime} \tab \code{data.frame} \tab results of rho' estimation after Kars et al. 2008 \cr
+\code{LxTx_table} \tab \code{data.frame} \tab Lx/Tx table, if curve data had been provided \cr
+\code{irr.times} \tab \code{integer} \tab vector with the irradiation times in seconds \cr
+}
+
+Slot: \bold{@info}\cr
+
+\tabular{lll}{
+\bold{OBJECT} \tab \code{TYPE} \tab \code{COMMENT}\cr
+\code{call} \tab \code{call} \tab the original function call\cr
+
+}
+}
+\description{
+The function analysis fading measurements and returns a fading rate including an error estimation.
+The function is not limited to standard fading measurements, as can be seen, e.g., Huntley and
+Lamothe 2001. Additionally, the density of recombination centres (rho') is estimated after
+Kars et al. 2008.
+}
+\details{
+All provided output corresponds to the \eqn{tc} value obtained by this analysis. Additionally
+in the output object the g-value normalised to 2-days is provided. The output of this function
+can be passed to the function \code{\link{calc_FadingCorr}}.\cr
+
+\bold{Fitting and error estimation}\cr
+
+For the fitting the function \code{\link[stats]{lm}} is used without applying weights. For the
+error estimation all input values, except tc, as the precision can be consdiered as sufficiently
+high enough with regard to the underlying problem, are sampled assuming a normal distribution
+for each value with the value as the mean and the provided uncertainty as standard deviation. \cr
+
+\bold{Density of recombination centres}
+
+The density of recombination centres, expressed by the dimensionless variable rho', is estimated
+by fitting equation 5 in Kars et al. 2008 to the data. For the fitting the function
+\code{\link[stats]{nls}} is used without applying weights. For the error estimation the same
+procedure as for the g-value is applied (see above).
+}
+\note{
+\bold{This function has BETA status and should not be used for publication work!}
+}
+\section{Function version}{
+ 0.1.5 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+## load example data (sample UNIL/NB123, see ?ExampleData.Fading)
+data("ExampleData.Fading", envir = environment())
+
+##(1) get fading measurement data (here a three column data.frame)
+fading_data <- ExampleData.Fading$fading.data$IR50
+
+##(2) run analysis
+g_value <- analyse_FadingMeasurement(
+fading_data,
+plot = TRUE,
+verbose = TRUE,
+n.MC = 10)
+
+##(3) this can be further used in the function
+## to correct the age according to Huntley & Lamothe, 2001
+results <- calc_FadingCorr(
+age.faded = c(100,2),
+g_value = g_value,
+n.MC = 10)
+
+
+} 
+
+\section{How to cite}{
+Kreutzer, S., Burow, C. (2017). analyse_FadingMeasurement(): Analyse fading measurements and returns the fading rate per decade (g-value). Function version 0.1.5. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\references{
+Auclair, M., Lamothe, M., Huot, S., 2003. Measurement of anomalous fading for feldpsar IRSL using
+SAR. Radiation Measurements 37, 487-492. doi:10.1016/S1350-4487(03)00018-0
+
+Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement
+and correction for it in optical dating. Canadian Journal of Earth Sciences 38,
+1093-1106. doi:10.1139/cjes-38-7-1093
+
+Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar
+IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021
+}
+\seealso{
+\code{\link{calc_OSLLxTxRatio}}, \code{\link{read_BIN2R}}, \code{\link{read_XSYG2R}},
+\code{\link{extract_IrradiationTimes}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\keyword{datagen}
diff --git a/man/analyse_IRSAR.RF.Rd b/man/analyse_IRSAR.RF.Rd
index 0385e33..8b04085 100644
--- a/man/analyse_IRSAR.RF.Rd
+++ b/man/analyse_IRSAR.RF.Rd
@@ -32,7 +32,7 @@ will be treated as minimum value and the maximum limit will be added automatical
 for the data analysis. Possible options are \code{"FIT"} or \code{"SLIDE"}.}
 
 \item{method.control}{\code{\link{list}} (optional): parameters to control the method, that can
-be passed to the choosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
+be passed to the chosen method. These are for (1) \code{method = "FIT"}: 'trace', 'maxiter', 'warnOnly',
 'minFactor' and for (2) \code{method = "SLIDE"}: 'correct_onset', 'show_density',  'show_fit', 'trace'.
 See details.}
 
@@ -46,8 +46,8 @@ values, \code{NA} and \code{NULL} (s. Details)
 
 \item{n.MC}{\code{\link{numeric}} (with default): set number of Monte
 Carlo runs for start parameter estimation (\code{method = "FIT"}) or
-error estimation (\code{method = "SLIDE"}). Note: Large values will
-significantly increase the computation time}
+error estimation (\code{method = "SLIDE"}). This value can be set to \code{NULL} to skip the
+MC runs. Note: Large values will significantly increase the computation time}
 
 \item{txtProgressBar}{\code{\link{logical}} (with default): enables \code{TRUE} or
 disables \code{FALSE} the progression bar during MC runs}
@@ -66,31 +66,87 @@ Currently supported arguments are \code{main}, \code{xlab}, \code{ylab},
 \code{\link[graphics]{legend}}), \code{xaxt}}
 }
 \value{
-A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
-returned:\cr
-
-\bold{@data}\cr
-$ data: \code{\link{data.frame}} table with De and corresponding values\cr
-..$ DE : \code{numeric}: the obtained equivalent dose\cr
-..$ DE.ERROR : \code{numeric}: (only method = "SLIDE") standard deviation obtained from MC runs \cr
-..$ DE.LOWER : \code{numeric}: 2.5\% quantile for De values obtained by MC runs \cr
-..$ DE.UPPER : \code{numeric}: 97.5\% quantile for De values obtained by MC runs  \cr
-..$ DE.STATUS  : \code{character}: test parameter status\cr
-..$ RF_NAT.LIM  : \code{charcter}: used RF_nat curve limits \cr
-..$ RF_REG.LIM : \code{character}: used RF_reg curve limits\cr
-..$ POSITION : \code{integer}: (optional) position of the curves\cr
-..$ DATE : \code{character}: (optional) measurement date\cr
-..$ SEQUENCE_NAME : \code{character}: (optional) sequence name\cr
-..$ UID : \code{character}: unique data set ID \cr
-$ test_parameters : \code{\link{data.frame}} table test parameters \cr
-$ fit : {\code{\link{nls}} \code{nlsModel} object} \cr
-$ slide : \code{\link{list}} data from the sliding process, including the sliding matrix\cr
-
-\bold{@info}\cr
-$ call : \code{\link[methods]{language-class}}: the orignal function call \cr
+The function returns numerical output and an (optional) plot.
+
+-----------------------------------\cr
+[ NUMERICAL OUTPUT ]\cr
+-----------------------------------\cr
+\bold{\code{RLum.Reuslts}}-object\cr
+
+\bold{slot:} \bold{\code{@data}} \cr
+
+[.. $data : \code{data.frame}]\cr
+
+\tabular{lll}{
+\bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+ DE \tab \code{numeric} \tab the obtained equivalent dose\cr
+ DE.ERROR \tab \code{numeric} \tab (only \code{method = "SLIDE"}) standard deviation obtained from MC runs \cr
+ DE.LOWER \tab \code{numeric}\tab 2.5\% quantile for De values obtained by MC runs \cr
+ DE.UPPER \tab \code{numeric}\tab 97.5\% quantile for De values obtained by MC runs  \cr
+ DE.STATUS  \tab \code{character}\tab test parameter status\cr
+ RF_NAT.LIM  \tab \code{charcter}\tab used RF_nat curve limits \cr
+ RF_REG.LIM \tab \code{character}\tab used RF_reg curve limits\cr
+ POSITION \tab \code{integer}\tab (optional) position of the curves\cr
+ DATE \tab \code{character}\tab (optional) measurement date\cr
+ SEQUENCE_NAME \tab \code{character}\tab (optional) sequence name\cr
+ UID \tab \code{character}\tab unique data set ID
+}
+
+[.. $De.MC : \code{numeric}]\cr
+
+A \code{numeric} vector with all the De values obtained by the MC runs.\cr
+
+[.. $test_parameters : \code{data.frame}]\cr
+
+\tabular{lll}{
+\bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+ POSITION \tab \code{numeric} \tab aliquot position \cr
+ PARAMETER \tab \code{character} \tab test parameter name \cr
+ THRESHOLD \tab \code{numeric} \tab set test parameter threshold value \cr
+ VALUE \tab \code{numeric} \tab the calculated test parameter value (to be compared with the threshold)\cr
+ STATUS \tab \code{character} \tab test parameter status either \code{"OK"} or \code{"FAILED"} \cr
+ SEQUENCE_NAME \tab \code{character} \tab name of the sequence, so far available \cr
+ UID \tab \code{character}\tab unique data set ID
+}
+
+[.. $fit : \code{data.frame}]\cr
+
+An \code{\link{nls}} object produced by the fitting.\cr
+
+[.. $slide : \code{list}]\cr
+
+A \code{\link{list}} with data produced during the sliding. Some elements are previously
+reported with the summary object data. List elements are:
+
+\tabular{lll}{
+\bold{Element} \tab \bold{Type} \tab \bold{Description}\cr
+ De \tab \code{numeric} \tab the final De obtained with the sliding approach \cr
+ De.MC \tab \code{numeric} \tab all De values obtained by the MC runs \cr
+ residuals \tab \code{numeric} \tab the obtained residuals for each channel of the curve \cr
+ trend.fit \tab \code{lm} \tab fitting results produced by the fitting of the residuals \cr
+ RF_nat.slided \tab \code{matrix} \tab the slided RF_nat curve \cr
+ t_n.id \tab \code{numeric} \tab the index of the t_n offset \cr
+ I_n \tab \code{numeric} \tab the vertical intensity offset if a vertical slide was applied \cr
+ algorithm_error \tab \code{numeric} \tab the vertical sliding suffers from a systematic effect induced by the used
+ algorithm. The returned value is the standard deviation of all obtained De values while expanding the
+ vertical sliding range. I can be added as systematic error to the final De error; so far wanted.\cr
+ vslide_range \tab \code{numeric} \tab the range used for the vertical sliding \cr
+ squared_residuals \tab \code{numeric} \tab the squared residuals (horizontal sliding)
+}
+
+
+\bold{slot:} \bold{\code{@info}} \cr
+
+The original function call (\code{\link[methods]{language-class}}-object)
 
 The output (\code{data}) should be accessed using the
 function \code{\link{get_RLum}}
+
+------------------------\cr
+[ PLOT OUTPUT ]\cr
+------------------------\cr
+
+The slided IR-RF curves with the finally obtained De
 }
 \description{
 Function to analyse IRSAR RF measurements on K-feldspar samples, performed
@@ -123,7 +179,7 @@ Erfurt et al., 2003. For the fitting the mean count value of the RF_nat curve is
 Function used for the fitting (according to Erfurt et al. (2003)): \cr
 
 \deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta}
-with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the inital
+with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the initial
 IR-RF flux, \eqn{\Delta\phi} the dose dependent change of the IR-RF flux,
 \eqn{\lambda} the exponential parameter, \eqn{D} the dose and \eqn{\beta}
 the dispersive factor.\cr\cr To obtain the palaeodose \eqn{D_{e}} the
@@ -139,7 +195,8 @@ allows to work with the original data without the need of any physical
 model. This approach was introduced for RF curves by Buylaert et al., 2012
 and Lapp et al., 2012.
 
-Here the sliding is done by searching for the minimum of the squared residuals.\cr
+Here the sliding is done by searching for the minimum of the squared residuals.
+For the mathematical details of the implementation see Frouin et al., 2017 \cr
 
 \bold{\code{method.control}}\cr
 
@@ -151,20 +208,29 @@ handled using the argument \code{method.control} only, e.g.,
 \tabular{lll}{
 ARGUMENT       \tab METHOD               \tab DESCRIPTION\cr
 \code{trace}   \tab \code{FIT}, \code{SLIDE} \tab as in \code{\link{nls}}; shows sum of squared residuals\cr
+\code{trace_vslide} \tab \code{SLIDE} \tab \code{\link{logical}} argument to enable or disable the tracing of the vertical sliding\cr
 \code{maxiter} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
 \code{warnOnly} \tab \code{FIT}           \tab as in \code{\link{nls}}\cr
 \code{minFactor} \tab \code{FIT}            \tab as in \code{\link{nls}}\cr
-\code{correct_onset} \tab \code{SLIDE}      \tab The logical argument literally spoken,
-shifts the curves along the x-axis by the first channel, as light is expected in the first channel.
- The default value is \code{TRUE}.\cr
+\code{correct_onset} \tab \code{SLIDE}      \tab The logical argument shifts the curves along the x-axis by the first channel,
+as light is expected in the first channel. The default value is \code{TRUE}.\cr
 \code{show_density} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
 enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr
 \code{show_fit} \tab \code{SLIDE}       \tab \code{\link{logical}} (with default)
-enables or disables the plot of the fitted curve rountinly obtained during the evaluation.\cr
-\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (wiht default):
-This controls the number of MC runs within the sliding (assesing the possible minimum values).
+enables or disables the plot of the fitted curve routinely obtained during the evaluation.\cr
+\code{n.MC}                  \tab \code{SLIDE}       \tab    \code{\link{integer}} (with default):
+This controls the number of MC runs within the sliding (assessing the possible minimum values).
 The default \code{n.MC = 1000}. Note: This parameter is not the same as controlled by the
-function argument \code{n.MC} \cr
+function argument \code{n.MC}. \cr
+\code{vslide_range} \tab \code{SLDE} \tab \code{\link{logical}} or \code{\link{numeric}} or \code{\link{character}} (with default):
+This argument sets the boundaries for a vertical curve
+sliding. The argument expects a vector with an absolute minimum and a maximum (e.g., \code{c(-1000,1000)}).
+Alternatively the values \code{NULL} and \code{'auto'} are allowed. The automatic mode detects the
+reasonable vertical sliding range (\bold{recommended}). \code{NULL} applies no vertical sliding.
+The default is \code{NULL}.\cr
+\code{cores} \tab \code{SLIDE} \tab \code{number} or \code{character} (with default): set number of cores to be allocated
+for a parallel processing of the Monte-Carlo runs. The default value is \code{NULL} (single thread),
+the recommended values is \code{'auto'}. An optional number (e.g., \code{cores} = 8) assigns a value manually.
 }
 
 \bold{Error estimation}\cr
@@ -233,19 +299,18 @@ given above. Note: As this procedure requests more computation time, setting of
 to \code{NULL} also prevents a calculation of the remaining two.
 }
 \note{
-\bold{[THIS FUNCTION HAS BETA-STATUS]}\cr
-
 This function assumes that there is no sensitivity change during the
 measurements (natural vs. regenerated signal), which is in contrast to the
-findings from Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
+findings by Buylaert et al. (2012). Furthermore: In course of ongoing research this function has
 been almost fully re-written, but further thoughtful tests are still pending!
 However, as a lot new package functionality was introduced with the changes made
 for this function and to allow a part of such tests the re-newed code was made part
 of the current package.\cr
 }
 \section{Function version}{
- 0.6.11 (2016-07-16 11:28:11)
+ 0.7.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load data
@@ -271,10 +336,12 @@ results <- analyse_IRSAR.RF(
 
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). analyse_IRSAR.RF(): Analyse IRSAR RF measurements. Function version 0.7.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T.,
 2012. IR-RF dating of sand-sized K-feldspar extracts: A test of accuracy.
@@ -296,6 +363,10 @@ automated multi-spectral radioluminescence reading system for geochronometry
 and dosimetry. Nuclear Instruments and Methods in Physics Research Section
 B: Beam Interactions with Materials and Atoms 207, 487-499.
 
+Frouin, M., Huot, S., Kreutzer, S., Lahaye, C., Lamothe, M., Philippe, A., Mercier, N., 2017.
+An improved radiofluorescence single-aliquot regenerative dose protocol for K-feldspars.
+Quaternary Geochronology 38, 13-24. doi:10.1016/j.quageo.2016.11.004
+
 Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New
 luminescence measurement facilities in retrospective dosimetry. Radiation
 Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006
@@ -319,7 +390,9 @@ Measurements 32, 685-690.
 \seealso{
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
-\code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}
+\code{\link{nls}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link[parallel]{mclapply}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/analyse_SAR.CWOSL.Rd b/man/analyse_SAR.CWOSL.Rd
index e86fc3a..9e9a6eb 100644
--- a/man/analyse_SAR.CWOSL.Rd
+++ b/man/analyse_SAR.CWOSL.Rd
@@ -34,7 +34,7 @@ of type \code{\link{list}}. If the input is vector (e.g., \code{c(1,2)}) the 2nd
 as the maximum background integral for the Tx curve.}
 
 \item{rejection.criteria}{\code{\link{list}} (with default): provide a named list
-and set rejection criteria in percentage for further calculation. Can be a \code{\link{list}} in
+and set rejection criteria in \bold{percentage} for further calculation. Can be a \code{\link{list}} in
 a \code{\link{list}}, if \code{object} is of type \code{\link{list}}
 
 Allowed arguments are \code{recycling.ratio}, \code{recuperation.rate},
@@ -73,7 +73,8 @@ value!}
 \value{
 A plot (optional) and an \code{\linkS4class{RLum.Results}} object is
 returned containing the following elements:
-\item{De.values}{\link{data.frame} containing De-values, De-error and
+
+\item{data}{\link{data.frame} containing De-values, De-error and
 further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all
 calculated Lx/Tx values including signal, background counts and the dose
 points} \item{rejection.criteria}{\link{data.frame} with values that might
@@ -145,8 +146,9 @@ This function must not be mixed up with the function
 \bold{The function currently does only support 'OSL' or 'IRSL' data!}
 }
 \section{Function version}{
- 0.7.5 (2016-07-16 11:28:11)
+ 0.7.10 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load data
@@ -180,11 +182,12 @@ get_RLum(results)
 ##show LnTnLxTx table
 get_RLum(results, data.object = "LnLxTnTx.table")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). analyse_SAR.CWOSL(): Analyse SAR CW-OSL measurements. Function version 0.7.10. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
 after bleaching. Quaternary Science Reviews 7, 387-393.
@@ -206,6 +209,9 @@ doi:10.1016/j.radmeas.2008.06.002
 \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
 \code{\link{get_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
 \keyword{plot}
-
diff --git a/man/analyse_SAR.TL.Rd b/man/analyse_SAR.TL.Rd
index b4ca750..e6c43a2 100644
--- a/man/analyse_SAR.TL.Rd
+++ b/man/analyse_SAR.TL.Rd
@@ -5,9 +5,10 @@
 \title{Analyse SAR TL measurements}
 \usage{
 analyse_SAR.TL(object, object.background, signal.integral.min,
-  signal.integral.max, sequence.structure = c("PREHEAT", "SIGNAL",
-  "BACKGROUND"), rejection.criteria = list(recycling.ratio = 10,
-  recuperation.rate = 10), dose.points, log = "", ...)
+  signal.integral.max, integral_input = "channel",
+  sequence.structure = c("PREHEAT", "SIGNAL", "BACKGROUND"),
+  rejection.criteria = list(recycling.ratio = 10, recuperation.rate = 10),
+  dose.points, log = "", ...)
 }
 \arguments{
 \item{object}{\code{\linkS4class{RLum.Analysis}}(\bold{required}): input
@@ -23,6 +24,11 @@ channel number for the lower signal integral bound (e.g.
 channel number for the upper signal integral bound (e.g.
 \code{signal.integral.max = 200})}
 
+\item{integral_input}{\code{\link{character}} (with default): defines the input for the
+the arguments \code{signal.integral.min} and \code{signal.integral.max}. These limits can be
+either provided \code{'channel'} number (the default) or \code{'temperature'}. If \code{'temperature'}
+is chosen the best matching channel is selected.}
+
 \item{sequence.structure}{\link{vector} \link{character} (with default):
 specifies the general sequence structure. Three steps are allowed (
 \code{"PREHEAT"}, \code{"SIGNAL"}, \code{"BACKGROUND"}), in addition a
@@ -73,8 +79,9 @@ background see Aitken and Smith (1988)\cr
 from the input object without further warning.
 }
 \section{Function version}{
- 0.1.5 (2016-07-16 11:28:11)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -92,10 +99,12 @@ analyse_SAR.TL(object,
                fit.method = "EXP OR LIN",
                sequence.structure = c("SIGNAL", "BACKGROUND"))
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). analyse_SAR.TL(): Analyse SAR TL measurements. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation
 after bleaching.  Quaternary Science Reviews 7, 387-393.
@@ -109,6 +118,8 @@ improved single-aliquot regenerative-dose protocol. Radiation Measurements
 \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
 \code{\link{get_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
 \keyword{plot}
-
diff --git a/man/analyse_baSAR.Rd b/man/analyse_baSAR.Rd
index 7525f86..5775930 100644
--- a/man/analyse_baSAR.Rd
+++ b/man/analyse_baSAR.Rd
@@ -22,7 +22,8 @@ provided the list can only contain either \code{Risoe.BINfileData} objects or \c
 providing a file connection. Mixing of both types is not allowed. If an \code{\linkS4class{RLum.Results}}
 is provided the function directly starts with the Bayesian Analysis (see details)}
 
-\item{XLS_file}{\code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements)}
+\item{XLS_file}{\code{\link{character}} (optional): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements).
+Alternatively a \code{data.frame} of similar structure can be provided.}
 
 \item{aliquot_range}{\code{\link{numeric}} (optional): allows to limit the range of the aliquots
 used for the analysis. This argument has only an effect if the argument \code{XLS_file} is used or
@@ -241,7 +242,7 @@ are:
 (cf. \code{\link[rjags]{jags.model}})\cr
 \code{inits} \tab \code{\link{list}} \tab option to set initialisation values (cf. \code{\link[rjags]{jags.model}}) \cr
 \code{thin} \tab \code{\link{numeric}} \tab thinning interval for monitoring the Bayesian process (cf. \code{\link[rjags]{jags.model}})\cr
-\code{variables.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
+\code{variable.names} \tab \code{\link{character}} \tab set the variables to be monitored during the MCMC run, default:
 \code{'central_D'}, \code{'sigma_D'}, \code{'D'}, \code{'Q'}, \code{'a'}, \code{'b'}, \code{'c'}, \code{'g'}.
 Note: only variables present in the model can be monitored.
 }
@@ -310,8 +311,9 @@ Example for two BIN-files: \code{source_doserate = list(c(0.04, 0.006), c(0.05,
 \bold{The function is currently limited to work with standard Risoe BIN-files only!}
 }
 \section{Function version}{
- 0.1.25 (2016-09-09 10:32:17)
+ 0.1.29 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##(1) load package test data set
@@ -357,18 +359,21 @@ list(
 
 }
 
+} 
+
+\section{How to cite}{
+Mercier, N., Kreutzer, S. (2017). analyse_baSAR(): Bayesian models (baSAR) applied on luminescence data. Function version 0.1.29. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Norbert Mercier, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Sebastian Kreutzer,
-IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
 
-The underlying Bayesian model based on a contribution by Combes et al., 2015.
-\cr R Luminescence Package Team}
 \references{
 Combes, B., Philippe, A., Lanos, P., Mercier, N., Tribolo, C., Guerin, G., Guibert, P., Lahaye, C., 2015.
 A Bayesian central equivalent dose model for optically stimulated luminescence dating.
 Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001
 
+Mercier, N., Kreutzer, S., Christophe, C., Guerin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A.,
+Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its implementation
+in the R package 'Luminescence'. Ancient TL 34, 14-21.
+
 \bold{Further reading}
 
 Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013.
@@ -382,5 +387,10 @@ regenerative-dose protocol. Radiation Measurements 32, 57-73. doi:10.1016/S1350-
 \code{\link[readxl]{read_excel}}, \code{\link{verify_SingleGrainData}},
 \code{\link[rjags]{jags.model}}, \code{\link[rjags]{coda.samples}}, \code{\link{boxplot.default}}
 }
-\keyword{datagen}
+\author{
+Norbert Mercier, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France) \cr
 
+The underlying Bayesian model based on a contribution by Combes et al., 2015.
+\cr R Luminescence Package Team}
+\keyword{datagen}
diff --git a/man/analyse_pIRIRSequence.Rd b/man/analyse_pIRIRSequence.Rd
index 06ef96e..8af98f1 100644
--- a/man/analyse_pIRIRSequence.Rd
+++ b/man/analyse_pIRIRSequence.Rd
@@ -90,8 +90,9 @@ with the following options:\cr \code{pdf(file = "...", height = 15, width =
 15)}
 }
 \section{Function version}{
- 0.2.2 (2016-09-07 11:37:34)
+ 0.2.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -148,11 +149,12 @@ pdf(file = "...", height = 15, width = 15)
   dev.off()
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). analyse_pIRIRSequence(): Analyse post-IR IRSL sequences. Function version 0.2.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz
 using an improved single-aliquot regenerative-dose protocol. Radiation
@@ -168,6 +170,9 @@ doi:10.1016/j.radmeas.2008.06.002
 \code{\link{plot_GrowthCurve}}, \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}} \code{\link{get_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
 \keyword{plot}
-
diff --git a/man/analyse_portableOSL.Rd b/man/analyse_portableOSL.Rd
new file mode 100644
index 0000000..1c61aff
--- /dev/null
+++ b/man/analyse_portableOSL.Rd
@@ -0,0 +1,78 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/analyse_portableOSL.R
+\name{analyse_portableOSL}
+\alias{analyse_portableOSL}
+\title{Analyse portable CW-OSL measurements}
+\usage{
+analyse_portableOSL(object, signal.integral, invert = FALSE,
+  normalise = FALSE, plot = TRUE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum.Analysis}} (\bold{required}):
+\code{RLum.Analysis} object produced by \code{\link{read_PSL2R}}.}
+
+\item{signal.integral}{\code{\link{vector}} (\bold{required}): A vector of two values
+specifying the lower and upper channel used to calculate the OSL/IRSL signal. Can
+be provided in form of \code{c(1, 5)} or \code{1:5}.}
+
+\item{invert}{\code{\link{logical}} (with default): \code{TRUE} to calculate
+and plot the data in reverse order.}
+
+\item{normalise}{\code{\link{logical}} (with default):
+\code{TRUE} to normalise the OSL/IRSL signals by the mean of all corresponding
+data curves.}
+
+\item{plot}{\code{\link{logical}} (with default): enable/disable plot output}
+
+\item{...}{currently not used.}
+}
+\value{
+Returns an S4 \code{\linkS4class{RLum.Results}} object containing
+the following elements:
+}
+\description{
+The function analyses CW-OSL curve data produced by a SUERC portable OSL reader and
+produces a combined plot of OSL/IRSL signal intensities, OSL/IRSL depletion ratios
+and the IRSL/OSL ratio.
+}
+\details{
+This function only works with \code{RLum.Analysis} objects produced by \code{\link{read_PSL2R}}.
+It further assumes (or rather requires) an equal amount of OSL and IRSL curves that
+are pairwise combined for calculating the IRSL/OSL ratio. For calculating the depletion ratios
+the cumulative signal of the last n channels (same number of channels as specified by \code{signal.integral})
+is divided by cumulative signal of the first n channels (\code{signal.integral}).
+}
+\section{Function version}{
+ 0.0.3 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+# (1) load example data set
+data("ExampleData.portableOSL", envir = environment())
+
+# (2) merge and plot all RLum.Analysis objects
+merged <- merge_RLum(ExampleData.portableOSL)
+plot_RLum(merged, combine = TRUE)
+merged
+
+# (3) analyse and plot
+results <- analyse_portableOSL(merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE)
+get_RLum(results)
+
+
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Burow, C. (2017). analyse_portableOSL(): Analyse portable CW-OSL measurements. Function version 0.0.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{datagen}
+\keyword{plot}
diff --git a/man/app_RLum.Rd b/man/app_RLum.Rd
index 3057d91..6240b24 100644
--- a/man/app_RLum.Rd
+++ b/man/app_RLum.Rd
@@ -4,7 +4,7 @@
 \alias{app_RLum}
 \title{Run Luminescence shiny apps (wrapper)}
 \usage{
-app_RLum(app, ...)
+app_RLum(app = NULL, ...)
 }
 \arguments{
 \item{app}{\code{\link{character}} (required): name of the application to start. See details for a list
@@ -18,9 +18,14 @@ Wrapper for the function \code{\link[RLumShiny]{app_RLum}} from the package
 see the manual of this package.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \author{
 Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Burow, C. (2017). app_RLum(): Run Luminescence shiny apps (wrapper). Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/apply_CosmicRayRemoval.Rd b/man/apply_CosmicRayRemoval.Rd
index 4af0297..61af41f 100644
--- a/man/apply_CosmicRayRemoval.Rd
+++ b/man/apply_CosmicRayRemoval.Rd
@@ -78,8 +78,9 @@ dataset (see example).
 -
 }
 \section{Function version}{
- 0.2.1 (2016-05-02 09:36:06)
+ 0.2.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -89,11 +90,12 @@ dataset (see example).
 ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych")
 ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "smooth")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). apply_CosmicRayRemoval(): Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object. Function version 0.2.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Pych, W., 2003. A Fast Algorithm for Cosmic-Ray Removal from
 Single Images. Astrophysics 116, 148-153.
@@ -103,5 +105,8 @@ Single Images. Astrophysics 116, 148-153.
 \code{\linkS4class{RLum.Data.Spectrum}}, \code{\link{smooth}},
 \code{\link{smooth.spline}}, \code{\link{apply_CosmicRayRemoval}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/apply_EfficiencyCorrection.Rd b/man/apply_EfficiencyCorrection.Rd
index 2ad18a9..d6d48b9 100644
--- a/man/apply_EfficiencyCorrection.Rd
+++ b/man/apply_EfficiencyCorrection.Rd
@@ -38,8 +38,9 @@ sufficiently correct for spectral efficiency of the entire optical system
 (e.g., spectrometer, camera ...).
 }
 \section{Function version}{
- 0.1.1 (2016-05-02 09:36:06)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -48,16 +49,20 @@ sufficiently correct for spectral efficiency of the entire optical system
 ##
 ## your.spectrum <- apply_EfficiencyCorrection(your.spectrum, )
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Friedrich, J. (2017). apply_EfficiencyCorrection(): Function to apply spectral efficiency correction to RLum.Data.Spectrum S4 class objects. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France),\cr Johannes Friedrich, University of Bayreuth (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
 \seealso{
 \code{\linkS4class{RLum.Data.Spectrum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Johannes Friedrich, University of Bayreuth (Germany)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/as.Rd b/man/as.Rd
index 9a96772..64cc32c 100644
--- a/man/as.Rd
+++ b/man/as.Rd
@@ -1,7 +1,13 @@
 % Generated by roxygen2: do not edit by hand
-% Please edit documentation in R/RLum.Analysis-class.R, R/RLum.Data.Curve-class.R, R/RLum.Data.Image-class.R, R/RLum.Data.Spectrum-class.R, R/RLum.Results-class.R
+% Please edit documentation in R/RLum.Analysis-class.R,
+%   R/RLum.Data.Curve-class.R, R/RLum.Data.Image-class.R,
+%   R/RLum.Data.Spectrum-class.R, R/RLum.Results-class.R
 \name{as}
 \alias{as}
+\alias{as}
+\alias{as}
+\alias{as}
+\alias{as}
 \title{as() - RLum-object coercion}
 \arguments{
 \item{from}{\code{\linkS4class{RLum}} or \code{\link{list}}, \code{\link{data.frame}}, \code{\link{matrix}}
@@ -74,4 +80,3 @@ R data structures will be always loosely!
 \seealso{
 \code{\link[methods]{as}}
 }
-
diff --git a/man/bin_RLum.Data.Rd b/man/bin_RLum.Data.Rd
index dce5e12..1cf7c8d 100644
--- a/man/bin_RLum.Data.Rd
+++ b/man/bin_RLum.Data.Rd
@@ -28,8 +28,9 @@ in the documentations of the corresponding \code{\linkS4class{RLum.Data}} class.
 Currenlty only \code{RLum.Data} objects of class \code{RLum.Data.Curve} are supported!
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load example data
@@ -49,12 +50,16 @@ plot_RLum(bin_RLum.Data(curve, bin_size = 2))
 plot_RLum(bin_RLum.Data(curve, bin_size = 4))
 
 }
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}}
+}
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
-\cr R Luminescence Package Team}
-\seealso{
-\code{\linkS4class{RLum.Data.Curve}}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). bin_RLum.Data(): Channel binning - method dispatchter. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\keyword{utilities}
 
+\keyword{utilities}
diff --git a/man/calc_AliquotSize.Rd b/man/calc_AliquotSize.Rd
index 8f73445..d16aa92 100644
--- a/man/calc_AliquotSize.Rd
+++ b/man/calc_AliquotSize.Rd
@@ -101,8 +101,9 @@ default, \code{10^5} iterations are used, but can be reduced/increased with
 boxplot together with a statistical summary.
 }
 \section{Function version}{
- 0.31 (2016-05-16 22:20:28)
+ 0.31 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## Estimate the amount of grains on a small aliquot
@@ -112,21 +113,34 @@ calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, MC.iter = 100)
 calc_AliquotSize(grain.size = c(100,200), sample.diameter = 8,
                  grains.counted = c(2525,2312,2880), MC.iter = 100)
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_AliquotSize(): Estimate the amount of grains on an aliquot. Function version 0.31. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Duller, G.A.T., 2008. Single-grain optical dating of Quaternary
 sediments: why aliquot size matters in luminescence dating. Boreas 37,
-589-612.  \cr\cr Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
-are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr \bold{Further
-reading} \cr\cr Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
+589-612.
+
+Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains
+are there on a single aliquot?. Ancient TL 30, 9-16. \cr\cr
+
+\bold{Further reading} \cr\cr
+
+Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's
 Theorem on Circle Packing. \url{http://arxiv.org/pdf/1009.4322v1.pdf},
-2013-09-13. \cr\cr Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
+2013-09-13.
+
+Graham, R.L., Lubachevsky, B.D., Nurmela, K.J.,
 Oestergard, P.R.J., 1998.  Dense packings of congruent circles in a circle.
-Discrete Mathematics 181, 139-154. \cr\cr Huang, W., Ye, T., 2011. Global
+Discrete Mathematics 181, 139-154.
+
+Huang, W., Ye, T., 2011. Global
 optimization method for finding dense packings of equal circles in a circle.
 European Journal of Operational Research 210, 474-481.
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
diff --git a/man/calc_AverageDose.Rd b/man/calc_AverageDose.Rd
new file mode 100644
index 0000000..d3a44a4
--- /dev/null
+++ b/man/calc_AverageDose.Rd
@@ -0,0 +1,143 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_AverageDose.R
+\name{calc_AverageDose}
+\alias{calc_AverageDose}
+\title{Calculate the Average Dose and the dose rate dispersion}
+\usage{
+calc_AverageDose(data, sigma_m = NULL, Nb_BE = 500, na.rm = TRUE,
+  plot = TRUE, verbose = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
+(\bold{required}): for \code{data.frame}: two columns with De
+\code{(data[,1])} and De error \code{(values[,2])}}
+
+\item{sigma_m}{\code{\link{numeric}} (\bold{required}): the overdispersion resulting from a dose recovery
+experiment, i.e. when all grains have  received the same dose. Indeed in such a case, any
+overdispersion (i.e. dispersion on top of analytical uncertainties) is, by definition, an
+unrecognised measurement uncertainty.}
+
+\item{Nb_BE}{\code{\link{integer}} (with default): sample size used for the bootstrapping}
+
+\item{na.rm}{\code{\link{logical}} (with default): exclude NA values
+from the data set prior to any further operation.}
+
+\item{plot}{\code{\link{logical}} (with default): enables/disables plot output}
+
+\item{verbose}{\code{\link{logical}} (with default): enables/disables terminal output}
+
+\item{...}{further arguments that can be passed to \code{\link[graphics]{hist}}. As three plots
+are returned all arguments need to be provided as \code{\link{list}},
+e.g., \code{main = list("Plot 1", "Plot 2", "Plot 3")}. Note: not all arguments of \code{hist} are
+supported, but the output of \code{hist} is returned and can be used of own plots. \cr
+
+Further supported arguments: \code{mtext} (\code{character}), \code{rug} (\code{TRUE/FALSE}).}
+}
+\value{
+The function returns numerical output and an (optional) plot.
+
+-----------------------------------\cr
+[ NUMERICAL OUTPUT ]\cr
+-----------------------------------\cr
+\bold{\code{RLum.Reuslts}}-object\cr
+
+\bold{slot:} \bold{\code{@data}} \cr
+
+[.. $summary : \code{data.frame}]\cr
+
+\tabular{lll}{
+\bold{Column} \tab \bold{Type} \tab \bold{Description}\cr
+ AVERAGE_DOSE \tab \code{numeric} \tab the obtained averge dose\cr
+ AVERAGE_DOSE.SE \tab \code{numeric} \tab the average dose error \cr
+ SIGMA_D \tab \code{numeric}\tab sigma \cr
+ SIGMA_D.SE \tab \code{numeric}\tab standard error of the sigma  \cr
+ IC_AVERAGE_DOSE.LEVEL  \tab \code{character}\tab confidence level average dose\cr
+ IC_AVERAGE_DOSE.LOWER  \tab \code{charcter}\tab lower quantile of average dose \cr
+ IC_AVERAGE_DOSE.UPPER \tab \code{character}\tab upper quantile of average dose\cr
+ IC_SIGMA_D.LEVEL \tab \code{integer}\tab confidence level sigma\cr
+ IC_SIGMA_D.LOWER \tab \code{character}\tab lower sigma quantile\cr
+ IC_SIGMA_D.UPPER \tab \code{character}\tab upper sigma quantile\cr
+ L_MAX \tab \code{character}\tab maximum likelihood value
+}
+
+[.. $dstar : \code{matrix}]\cr
+
+Matrix with bootstrap values\cr
+
+[.. $hist : \code{list}]\cr
+
+Object as produced by the function histogram
+
+------------------------\cr
+[ PLOT OUTPUT ]\cr
+------------------------\cr
+
+The function returns two different plot panels.
+
+(1) An abanico plot with the dose values
+
+(2) A histogram panel comprising 3 histograms with the equivalent dose and the bootstrapped average
+dose and the sigma values.
+}
+\description{
+This functions calculates the Average Dose and their extrinsic dispersion and estimates
+the standard errors by bootstrapping based on the Average Dose Model by Guerin et al., 2017
+}
+\details{
+\bold{\code{sigma_m}}\cr
+
+The program requires the input of a known value of sigma_m,
+which corresponds to the intrinsic overdispersion, as determined
+by a dose recovery experiment. Then the dispersion in doses (sigma_d)
+will be that over and above sigma_m (and individual uncertainties sigma_wi).
+}
+\note{
+This function has beta status!
+}
+\section{Function version}{
+ 0.1.4 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+##Example 01 using package example data
+##load example data
+data(ExampleData.DeValues, envir = environment())
+
+##calculate Average dose
+##(use only the first 56 values here)
+AD <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,],
+sigma_m = 0.1)
+
+##plot De and set Average dose as central value
+plot_AbanicoPlot(
+ data = ExampleData.DeValues$CA1[1:56,],
+ z.0 = AD$summary$AVERAGE_DOSE)
+
+} 
+
+\section{How to cite}{
+Christophe, C., Philippe, A., Guerin, G., Kreutzer, S. (2017). calc_AverageDose(): Calculate the Average Dose and the dose rate dispersion. Function version 0.1.4. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\references{
+Guerin, G., Christophe, C., Philippe, A., Murray, A.S., Thomsen, K.J., Tribolo, C., Urbanova, P.,
+Jain, M., Guibert, P., Mercier, N., Kreutzer, S., Lahaye, C., 2017. Absorbed dose, equivalent dose,
+measured dose rates, and implications for OSL age estimates: Introducing the Average Dose Model.
+Quaternary Geochronology 1-32. doi:10.1016/j.quageo.2017.04.002
+
+\bold{Further reading}\cr
+
+Efron, B., Tibshirani, R., 1986. Bootstrap Methods for Standard Errors, Confidence Intervals,
+and Other Measures of Statistical Accuracy. Statistical Science 1, 54-75.
+}
+\seealso{
+\code{\link{read.table}}, \code{\link[graphics]{hist}}
+}
+\author{
+Claire Christophe, IRAMAT-CRP2A, Universite de Nantes (France),
+Anne Philippe, Universite de Nantes, (France),
+Guillaume Guerin, IRAMAT-CRP2A, Universite Bordeaux Montaigne, (France),
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne, (France)
+\cr R Luminescence Package Team}
+\keyword{datagen}
diff --git a/man/calc_CentralDose.Rd b/man/calc_CentralDose.Rd
index cd6672c..391179b 100644
--- a/man/calc_CentralDose.Rd
+++ b/man/calc_CentralDose.Rd
@@ -10,12 +10,14 @@ calc_CentralDose(data, sigmab, log = TRUE, plot = TRUE, ...)
 \arguments{
 \item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
 (\bold{required}): for \code{data.frame}: two columns with De
-\code{(data[,1])} and De error \code{(values[,2])}}
+\code{(data[,1])} and De error \code{(data[,2])}}
 
-\item{sigmab}{\code{\link{numeric}} (with default): spread in De values
-given as a fraction (e.g. 0.2). This value represents the expected
-overdispersion in the data should the sample be well-bleached (Cunningham &
-Walling 2012, p. 100).}
+\item{sigmab}{\code{\link{numeric}} (with default): additional spread in De values.
+This value represents the expected overdispersion in the data should the sample be 
+well-bleached (Cunningham & Walling 2012, p. 100).
+\bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+sigmab must be provided in the same absolute units of the De values (seconds or Gray).}
 
 \item{log}{\code{\link{logical}} (with default): fit the (un-)logged central
 age model to De data}
@@ -54,8 +56,9 @@ appendix of Galbraith & Laslett (1993, 468-470) and Galbraith & Roberts
 (2012, 15)
 }
 \section{Function version}{
- 1.3.1 (2016-05-02 09:36:06)
+ 1.3.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load example data
@@ -64,11 +67,12 @@ data(ExampleData.DeValues, envir = environment())
 ##apply the central dose model
 calc_CentralDose(ExampleData.DeValues$CA1)
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_CentralDose(): Apply the central age model (CAM) after Galbraith et al. (1999) to a given De distribution. Function version 1.3.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany) \cr Based on a
-rewritten S script of Rex Galbraith, 2010 \cr
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
 mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
@@ -97,4 +101,7 @@ obtain a reproducible distribution?. Ancient TL 26, 3-10.
 \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
 \code{\link{calc_MinDose}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr
+\cr R Luminescence Package Team}
diff --git a/man/calc_CommonDose.Rd b/man/calc_CommonDose.Rd
index fccae27..de519af 100644
--- a/man/calc_CommonDose.Rd
+++ b/man/calc_CommonDose.Rd
@@ -12,10 +12,12 @@ calc_CommonDose(data, sigmab, log = TRUE, ...)
 (\bold{required}): for \code{data.frame}: two columns with De
 \code{(data[,1])} and De error \code{(values[,2])}}
 
-\item{sigmab}{\code{\link{numeric}} (with default): spread in De values
-given as a fraction (e.g. 0.2). This value represents the expected
-overdispersion in the data should the sample be well-bleached (Cunningham &
-Walling 2012, p. 100).}
+\item{sigmab}{\code{\link{numeric}} (with default): additional spread in De values.
+This value represents the expected overdispersion in the data should the sample be 
+well-bleached (Cunningham & Walling 2012, p. 100).
+\bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+sigmab must be provided in the same absolute units of the De values (seconds or Gray).}
 
 \item{log}{\code{\link{logical}} (with default): fit the (un-)logged common
 age model to De data}
@@ -49,8 +51,9 @@ calculated using the un-logged estimates of De and their absolute standard
 error (Galbraith & Roberts 2012, p. 14).
 }
 \section{Function version}{
- 0.1 (2016-05-02 09:36:06)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -59,10 +62,12 @@ data(ExampleData.DeValues, envir = environment())
 ## apply the common dose model
 calc_CommonDose(ExampleData.DeValues$CA1)
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_CommonDose(): Apply the (un-)logged common age model after Galbraith et al. (1999) to a given De distribution. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for
 mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470.
@@ -90,4 +95,6 @@ needed to obtain a reproducible distribution?. Ancient TL 26, 3-10.
 \code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
 \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
diff --git a/man/calc_CosmicDoseRate.Rd b/man/calc_CosmicDoseRate.Rd
index 473aa06..56d8812 100644
--- a/man/calc_CosmicDoseRate.Rd
+++ b/man/calc_CosmicDoseRate.Rd
@@ -6,7 +6,7 @@
 \usage{
 calc_CosmicDoseRate(depth, density, latitude, longitude, altitude,
   corr.fieldChanges = FALSE, est.age = NA, half.depth = FALSE,
-  error = 10)
+  error = 10, ...)
 }
 \arguments{
 \item{depth}{\code{\link{numeric}} (\bold{required}): depth of overburden
@@ -40,6 +40,8 @@ rate can safely be assumed.}
 
 \item{error}{\code{\link{numeric}} (with default): general error
 (percentage) to be implemented on corrected cosmic dose rate estimate}
+
+\item{...}{further arguments (\code{verbose} to disable/enable console output).}
 }
 \value{
 Returns a terminal output. In addition an
@@ -159,8 +161,9 @@ for near-surface samples as there is no equation known to the author of this
 function at the time of writing.
 }
 \section{Function version}{
- 0.5.2 (2015-11-29 17:27:48)
+ 0.5.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##(1) calculate cosmic dose rate (one absorber)
@@ -204,10 +207,12 @@ results<- calc_CosmicDoseRate(depth = c(0.1, 0.5 , 2.1, 2.7, 4.2, 6.3),
 #export results to .csv file - uncomment for usage
 #write.csv(results, file = "c:/users/public/results_profile.csv")
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_CosmicDoseRate(): Calculate the cosmic dose rate. Function version 0.5.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Allkofer, O.C., Carstensen, K., Dau, W.D., Jokisch, H., 1975.
 Letter to the editor. The absolute cosmic ray flux at sea level. Journal of
@@ -230,4 +235,6 @@ thermoluminescence dating. Latitude, altitude and depth dependences. PACT 6,
 \seealso{
 \code{\link{BaseDataSet.CosmicDoseRate}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
diff --git a/man/calc_FadingCorr.Rd b/man/calc_FadingCorr.Rd
index 0b188c5..3a71f72 100644
--- a/man/calc_FadingCorr.Rd
+++ b/man/calc_FadingCorr.Rd
@@ -6,7 +6,8 @@
 g-value and a given tc}
 \usage{
 calc_FadingCorr(age.faded, g_value, tc = NULL, tc.g_value = tc,
-  n.MC = 10000, seed = NULL, txtProgressBar = TRUE, verbose = TRUE)
+  n.MC = 10000, seed = NULL, interval = c(0.01, 500),
+  txtProgressBar = TRUE, verbose = TRUE)
 }
 \arguments{
 \item{age.faded}{\code{\link{numeric}} \code{\link{vector}} (\bold{required}): uncorrected
@@ -34,6 +35,10 @@ tries to find a 'stable' error for the age. Note: This may take a while!}
 \item{seed}{\code{\link{integer}} (optional): sets the seed for the random number generator
 in R using \code{\link{set.seed}}}
 
+\item{interval}{\code{\link{numeric}} (with default): a vector containing the end-points (age interval) of the
+interval to be searched for the root in 'ka'. This argument is passed to the function \code{\link[stats]{uniroot}}
+used for solving the equation.}
+
 \item{txtProgressBar}{\link{logical} (with default): enables or disables
 \code{\link{txtProgressBar}}}
 
@@ -118,12 +123,12 @@ case of an SAR measurement tc should be similar, however, if it differs, you hav
 tc value (the one used for estimating the g-value) using the argument \code{tc.g_value}.\cr
 }
 \note{
-The upper age limit is set to 500 ka! \cr
 Special thanks to Sebastien Huot for his support and clarification via e-mail.
 }
 \section{Function version}{
- 0.4.1 (2016-07-21 10:36:31)
+ 0.4.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##run the examples given in the appendix of Huntley and Lamothe, 2001
@@ -155,10 +160,12 @@ results <- calc_FadingCorr(
 ##access the last output
 get_RLum(results)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). calc_FadingCorr(): Apply a fading correction according to Huntley & Lamothe (2001) for a given g-value and a given tc. Function version 0.4.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading
 in K-feldspars and the measurement and correction for it in optical dating.
@@ -168,5 +175,7 @@ Canadian Journal of Earth Sciences, 38, 1093-1106.
 \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}},
 \code{\link{uniroot}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/calc_FastRatio.Rd b/man/calc_FastRatio.Rd
index ce34947..732b68f 100644
--- a/man/calc_FastRatio.Rd
+++ b/man/calc_FastRatio.Rd
@@ -5,9 +5,9 @@
 \title{Calculate the Fast Ratio for CW-OSL curves}
 \usage{
 calc_FastRatio(object, stimulation.power = 30.6, wavelength = 470,
-  sigmaF = 2.6e-17, sigmaM = 4.28e-18, Ch_L1 = 1, x = 1, x2 = 0.1,
-  dead.channels = c(0, 0), fitCW.sigma = FALSE, fitCW.curve = FALSE,
-  plot = TRUE, ...)
+  sigmaF = 2.6e-17, sigmaM = 4.28e-18, Ch_L1 = 1, Ch_L2 = NULL,
+  Ch_L3 = NULL, x = 1, x2 = 0.1, dead.channels = c(0, 0),
+  fitCW.sigma = FALSE, fitCW.curve = FALSE, plot = TRUE, ...)
 }
 \arguments{
 \item{object}{\code{\linkS4class{RLum.Analysis}}, 
@@ -26,6 +26,11 @@ medium component. Default value after Durcan & Duller (2011).}
 
 \item{Ch_L1}{\code{\link{numeric}} (with default): An integer specifying the channel for L1.}
 
+\item{Ch_L2}{\code{\link{numeric}} (optional): An integer specifying the channel for L2.}
+
+\item{Ch_L3}{\code{\link{numeric}} (optional): A vector of length 2 with integer
+values specifying the start and end channels for L3 (e.g., \code{c(40, 50)}).}
+
 \item{x}{\code{\link{numeric}} (with default): \% of signal remaining from the fast component.
 Used to define the location of L2 and L3 (start).}
 
@@ -68,8 +73,9 @@ required to reduce the fast and medium quartz OSL components to \code{x} and
 and end). The fast ratio is then calculated from: \eqn{(L1-L3)/(L2-L3)}.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 # load example CW-OSL curve
 data("ExampleData.CW_OSL_Curve")
@@ -80,12 +86,12 @@ res <- calc_FastRatio(ExampleData.CW_OSL_Curve)
 # show the summary table
 get_RLum(res)
 
+} 
+
+\section{How to cite}{
+King, G., Durcan, J., Burow, C. (2017). calc_FastRatio(): Calculate the Fast Ratio for CW-OSL curves. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Georgina King, University of Cologne (Germany) \cr
-Julie A. Durcan, University of Oxford (United Kingdom) \cr
-Christoph Burow, University of Cologne (Germany) \cr
-\cr R Luminescence Package Team}
+
 \references{
 Durcan, J.A. & Duller, G.A.T., 2011. The fast ratio: A rapid measure for testing
 the dominance of the fast component in the initial OSL signal from quartz.
@@ -104,4 +110,8 @@ due to unstable signal components. Quaternary Geochronology 4, 353-362.
 \code{\link{fit_CWCurve}}, \code{\link{get_RLum}}, \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}, \code{\linkS4class{RLum.Data.Curve}}
 }
-
+\author{
+Georgina King, University of Cologne (Germany) \cr
+Julie A. Durcan, University of Oxford (United Kingdom) \cr
+Christoph Burow, University of Cologne (Germany) \cr
+\cr R Luminescence Package Team}
diff --git a/man/calc_FiniteMixture.Rd b/man/calc_FiniteMixture.Rd
index 2f5bb59..16a62c7 100644
--- a/man/calc_FiniteMixture.Rd
+++ b/man/calc_FiniteMixture.Rd
@@ -105,8 +105,9 @@ per cent) calculated by the FFM. The last plot shows the achieved BIC scores
 and maximum log-likelihood estimates for each iteration of k.
 }
 \section{Function version}{
- 0.4 (2016-05-02 09:36:06)
+ 0.4 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -134,11 +135,12 @@ FMM
 ## fitted components
 get_RLum(object = FMM, data.object = "components")
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_FiniteMixture(): Apply the finite mixture model (FMM) after Galbraith (2005) to a given De distribution. Function version 0.4. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany) \cr Based on a
-rewritten S script of Rex Galbraith, 2006. \cr
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R.F. & Green, P.F., 1990. Estimating the component
 ages in a finite mixture. Nuclear Tracks and Radiation Measurements 17,
@@ -167,4 +169,7 @@ reproducible distribution?. Ancient TL 26, 3-10.
 \code{\link{calc_CentralDose}}, \code{\link{calc_CommonDose}},
 \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2006. \cr
+\cr R Luminescence Package Team}
diff --git a/man/calc_FuchsLang2001.Rd b/man/calc_FuchsLang2001.Rd
index bafbd28..fa509ee 100644
--- a/man/calc_FuchsLang2001.Rd
+++ b/man/calc_FuchsLang2001.Rd
@@ -59,8 +59,9 @@ Please consider the requirements and the constraints of this method
 (see Fuchs & Lang, 2001)
 }
 \section{Function version}{
- 0.4.1 (2016-05-02 09:36:06)
+ 0.4.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -70,11 +71,12 @@ data(ExampleData.DeValues, envir = environment())
 ##calculate De according to Fuchs & Lang (2001)
 temp<- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Burow, C. (2017). calc_FuchsLang2001(): Apply the model after Fuchs & Lang (2001) to a given De distribution.. Function version 0.4.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France) Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Fuchs, M. & Lang, A., 2001. OSL dating of coarse-grain fluvial
 quartz using single-aliqout protocols on sediments from NE Peloponnese,
@@ -89,5 +91,8 @@ Quaternary Science Reviews 22, 1161-1167.
 \code{\link{calc_FiniteMixture}}, \code{\link{calc_CentralDose}},
 \code{\link{calc_CommonDose}}, \code{\linkS4class{RLum.Results}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France) Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
 \keyword{dplot}
-
diff --git a/man/calc_HomogeneityTest.Rd b/man/calc_HomogeneityTest.Rd
index 99f1690..97c6a94 100644
--- a/man/calc_HomogeneityTest.Rd
+++ b/man/calc_HomogeneityTest.Rd
@@ -35,8 +35,9 @@ A simple homogeneity test for De estimates
 For details see Galbraith (2003).
 }
 \section{Function version}{
- 0.2 (2016-05-02 09:36:06)
+ 0.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -45,10 +46,12 @@ data(ExampleData.DeValues, envir = environment())
 ## apply the homogeneity test
 calc_HomogeneityTest(ExampleData.DeValues$BT998)
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_HomogeneityTest(): Apply a simple homogeneity test after Galbraith (2003). Function version 0.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R.F., 2003. A simple homogeneity test for estimates
 of dose obtained using OSL. Ancient TL 21, 75-77.
@@ -56,4 +59,6 @@ of dose obtained using OSL. Ancient TL 21, 75-77.
 \seealso{
 \code{\link{pchisq}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
diff --git a/man/calc_IEU.Rd b/man/calc_IEU.Rd
index 9b4aeac..a1199e1 100644
--- a/man/calc_IEU.Rd
+++ b/man/calc_IEU.Rd
@@ -49,8 +49,9 @@ This function uses the equations of Thomsen et al. (2007).  The parameters a
 and b are estimated from dose-recovery experiments.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load data
@@ -59,12 +60,12 @@ data(ExampleData.DeValues, envir = environment())
 ## apply the IEU model
 ieu <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1)
 
+} 
+
+\section{How to cite}{
+Smedley, R.K. (2017). calc_IEU(): Apply the internal-external-uncertainty (IEU) model after Thomsen et al. (2007) to a given De distribution. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Rachel Smedley, Geography & Earth Sciences, Aberystwyth University
-(United Kingdom) \cr Based on an excel spreadsheet and accompanying macro
-written by Kristina Thomsen.
-\cr R Luminescence Package Team}
+
 \references{
 Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model.
 Ancient TL 33, 16-21.
@@ -78,4 +79,8 @@ using single grains of quartz. Radiation Measurements 42, 370-379.
 \code{\link{calc_CentralDose}}, \code{\link{calc_FiniteMixture}},
 \code{\link{calc_FuchsLang2001}}, \code{\link{calc_MinDose}}
 }
-
+\author{
+Rachel Smedley, Geography & Earth Sciences, Aberystwyth University
+(United Kingdom) \cr Based on an excel spreadsheet and accompanying macro
+written by Kristina Thomsen.
+\cr R Luminescence Package Team}
diff --git a/man/calc_Kars2008.Rd b/man/calc_Kars2008.Rd
new file mode 100644
index 0000000..8f15683
--- /dev/null
+++ b/man/calc_Kars2008.Rd
@@ -0,0 +1,180 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/calc_Kars2008.R
+\name{calc_Kars2008}
+\alias{calc_Kars2008}
+\title{Apply the Kars et al. (2008) model}
+\usage{
+calc_Kars2008(data, rhop, ddot, readerDdot, normalise = TRUE,
+  summary = TRUE, plot = TRUE, ...)
+}
+\arguments{
+\item{data}{\code{\link{data.frame}} (\bold{required}):
+A three column data frame with numeric values on a) dose (s), b) LxTx and and
+c) LxTx error. If a two column data frame is provided it is automatically
+assumed that errors on LxTx are missing. A third column will be attached
+with an arbitrary 5 \% error on the provided LxTx values.\cr
+Can also be a wide table, i.e. a \code{\link{data.frame}} with a number of colums divisible by 3
+and where each triplet has the aforementioned column structure.}
+
+\item{rhop}{\code{\link{numeric}} (\bold{required}):
+The density of recombination centres (\eqn{\rho}') and its error (see Huntley 2006),
+given as numeric vector of length two. Note that \eqn{\rho}' must \bold{not} be
+provided as the common logarithm. Example: \code{rhop = c(2.92e-06, 4.93e-07)}.}
+
+\item{ddot}{\code{\link{numeric}} (\bold{required}):
+Environmental dose rate and its error, given as a numeric vector of length two.
+Expected unit: Gy/ka. Example: \code{ddot = c(3.7, 0.4)}.}
+
+\item{readerDdot}{\code{\linkS4class{RLum.Analysis}} (\bold{required}):
+Dose rate of the irradiation source of the OSL reader and its error,
+given as a numeric vector of length two.
+Expected unit: Gy/s. Example: \code{readerDdot = c(0.08, 0.01)}.}
+
+\item{normalise}{\code{\link{logical}} (with default):
+If \code{TRUE} (the default) all measured and computed LxTx values are
+normalised by the pre-exponential factor A (see details).}
+
+\item{summary}{\code{\link{logical}} (with default):
+If \code{TRUE} (the default) various parameters provided by the user
+and calculated by the model are added as text on the right-hand side of the
+plot.}
+
+\item{plot}{\code{\link{logical}} (with default): enables/disables plot output.}
+
+\item{...}{further arguments passed to \code{\link{plot}} and
+\code{\link[Luminescence]{plot_GrowthCurve}}.}
+}
+\value{
+An \code{\linkS4class{RLum.Results}} object is returned:
+
+Slot: \bold{@data}\cr
+
+\tabular{lll}{
+\bold{OBJECT} \tab \bold{TYPE} \tab \bold{COMMENT}\cr
+\code{results} \tab \code{data.frame} \tab results of the of Kars et al. 2008 model \cr
+\code{data} \tab \code{data.frame} \tab original input data \cr
+\code{Ln} \tab \code{numeric} \tab Ln and its error \cr
+\code{LxTx_tables} \tab \code{list} \tab A \code{list} of \code{data.frames}
+containing data on dose, LxTx and LxTx error for each of the dose response curves.
+Note that these \bold{do not} contain the natural Ln signal, which is provided separately. \cr
+\code{fits} \tab \code{list} \tab A \code{list} of \code{nls}
+ objects produced by \code{\link[minpack.lm]{nlsLM}} when fitting the dose response curves \cr
+}
+
+Slot: \bold{@info}\cr
+
+\tabular{lll}{
+\bold{OBJECT} \tab \bold{TYPE} \tab \bold{COMMENT} \cr
+\code{call} \tab \code{call} \tab the original function call \cr
+\code{args} \tab \code{list} \tab arguments of the original function call \cr
+
+}
+}
+\description{
+A function to calculate the expected sample specific fraction of saturation
+following Kars et al. (2008) and Huntley (2006).
+}
+\details{
+This function applies the approach described in Kars et al. (2008),
+developed from the model of Huntley (2006) to calculate the expected sample
+specific fraction of saturation of a feldspar and also to calculate fading
+corrected age using this model. \eqn{\rho}' (\code{rhop}), the density of recombination
+centres, is a crucial parameter of this model and must be determined
+separately from a fading measurement. The function
+\code{\link[Luminescence]{analyse_FadingMeasurement}}
+can be used to calculate the sample specific \eqn{\rho}' value.
+
+Firstly the unfaded D0 value is determined through applying equation 5 of
+Kars et al. (2008) to the measured LxTx data as a function of irradiation
+time, and fitting the data with a single saturating exponential of the form:
+
+\deqn{LxTx(t*) = A x \phi(t*) x (1 - exp(-(t* / D0)))}
+
+where
+
+\deqn{\phi(t*) = exp(-\rho' x ln(1.8 x s_tilde x t*)^3)}
+
+after King et al. (2016) where \code{A} is a pre-exponential factor,
+\code{t*} (s) is the irradiation time, starting at the mid-point of
+irradiation (Auclair et al. 2003) and \code{s_tilde} (3x10^15 s^-1) is the athermal
+frequency factor after Huntley (2006). \cr
+
+Using fit parameters \code{A} and \code{D0}, the function then computes a natural dose
+response curve using the environmental dose rate, \code{D_dot} (Gy/s) and equations
+[1] and [2]. Computed LxTx values are then fitted using the
+\code{\link[Luminescence]{plot_GrowthCurve}} function and the laboratory measured LnTn can then
+be interpolated onto this curve to determine the fading corrected
+De value, from which the fading corrected age is calculated. \cr
+
+The \code{calc_Kars2008} function also calculates the level of saturation (n/N)
+and the field saturation (i.e. athermal steady state, (n/N)_SS) value for
+the sample under investigation using the sample specific \eqn{\rho}',
+unfaded \code{D0} and \code{D_dot} values, following the approach of Kars et al. (2008). \cr
+
+Uncertainties are reported at 1 sigma and are assumed to be normally
+distributed and are estimated using monte-carlo resamples (\code{n.MC = 1000})
+of \eqn{\rho}' and LxTx during dose response curve fitting, and of \eqn{\rho}'
+in the derivation of (n/N) and (n/N)_SS.
+}
+\note{
+\bold{This function has BETA status and should not be used for publication work!}
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+## Load example data (sample UNIL/NB123, see ?ExampleData.Fading)
+data("ExampleData.Fading", envir = environment())
+
+## (1) Set all relevant parameters
+# a. fading measurement data (IR50)
+fading_data <- ExampleData.Fading$fading.data$IR50
+
+# b. Dose response curve data
+data <- ExampleData.Fading$equivalentDose.data$IR50
+
+## (2) Define required function parameters
+ddot <- c(7.00, 0.004)
+readerDdot <- c(0.134, 0.0067)
+
+# Analyse fading measurement and get an estimate of rho'.
+# Note that the RLum.Results object can be directly used for further processing.
+# The number of MC runs is reduced for this example
+rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10)
+
+## (3) Apply the Kars et al. (2008) model to the data
+kars <- calc_Kars2008(data = data,
+                      rhop = rhop,
+                      ddot = ddot,
+                      readerDdot = readerDdot,
+                      n.MC = 50
+                      )
+} 
+
+\section{How to cite}{
+King, G., Burow, C. (2017). calc_Kars2008(): Apply the Kars et al. (2008) model. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\references{
+Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar
+IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021
+
+Huntley, D.J., 2006. An explanation of the power-law decay of luminescence.
+Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020
+
+King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016.
+Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004
+
+
+\bold{Further reading}
+
+Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct
+for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230.
+}
+\author{
+Georgina King, University of Cologne (Germany), \cr
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team}
+\keyword{datagen}
diff --git a/man/calc_MaxDose.Rd b/man/calc_MaxDose.Rd
index 3cb7e89..80c6a28 100644
--- a/man/calc_MaxDose.Rd
+++ b/man/calc_MaxDose.Rd
@@ -9,13 +9,16 @@ calc_MaxDose(data, sigmab, log = TRUE, par = 3, bootstrap = FALSE,
 }
 \arguments{
 \item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
-(\bold{required}): for \code{data.frame}: two columns with De
-\code{(data[,1])} and De error \code{(values[,2])}}
+(\bold{required}): for \code{data.frame}: two columns with De \code{(data[
+,1])} and De error \code{(data[ ,2])}.}
 
-\item{sigmab}{\code{\link{numeric}} (\bold{required}): spread in De values
-given as a fraction (e.g. 0.2). This value represents the expected
-overdispersion in the data should the sample be well-bleached (Cunningham &
-Walling 2012, p. 100).}
+\item{sigmab}{\code{\link{numeric}} (\bold{required}): additional spread in De values.
+This value represents the expected overdispersion in the data should the sample be 
+well-bleached (Cunningham & Walling 2012, p. 100).
+\bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+sigmab must be provided in the same absolute units of the De values (seconds or Gray).
+See details (\code{\link{calc_MinDose}}.}
 
 \item{log}{\code{\link{logical}} (with default): fit the (un-)logged three
 parameter minimum dose model to De data}
@@ -45,22 +48,31 @@ that calls calc_MinDose() and applies a similiar approach as described in
 Olley et al. (2006).
 }
 \details{
-\bold{Data transformation} \cr\cr To estimate the maximum dose population
+\bold{Data transformation} \cr\cr 
+To estimate the maximum dose population
 and its standard error, the three parameter minimum age model of Galbraith
 et al. (1999) is adapted. The measured De values are transformed as follows:
-\cr\cr 1. convert De values to natural logs \cr 2. multiply the logged data
-to creat a mirror image of the De distribution\cr 3. shift De values along
-x-axis by the smallest x-value found to obtain only positive values \cr 4.
-combine in quadrature the measurement error associated with each De value
-with a relative error specified by sigmab \cr 5. apply the MAM to these data
-\cr\cr When all calculations are done the results are then converted as
-follows\cr\cr 1. subtract the x-offset \cr 2. multiply the natural logs by
--1 \cr 3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
-\bold{Further documentation} \cr\cr Please see \code{\link{calc_MinDose}}.
+\cr\cr 
+1. convert De values to natural logs \cr 
+2. multiply the logged data to creat a mirror image of the De distribution \cr 
+3. shift De values along x-axis by the smallest x-value found to obtain only positive values \cr
+4. combine in quadrature the measurement error associated with each De value
+with a relative error specified by sigmab \cr 
+5. apply the MAM to these data \cr\cr
+
+When all calculations are done the results are then converted as
+follows\cr\cr 
+1. subtract the x-offset \cr 
+2. multiply the natural logs by -1 \cr 
+3. take the exponent to obtain the maximum dose estimate in Gy \cr\cr
+
+\bold{Further documentation} \cr\cr 
+Please see \code{\link{calc_MinDose}}.
 }
 \section{Function version}{
- 0.3 (2015-11-29 17:27:48)
+ 0.3.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -69,37 +81,50 @@ data(ExampleData.DeValues, envir = environment())
 # apply the maximum dose model
 calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3)
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_MaxDose(): Apply the maximum age model to a given De distribution. Function version 0.3.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany) \cr Based on a
-rewritten S script of Rex Galbraith, 2010 \cr
-\cr R Luminescence Package Team}
+
 \references{
 Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
 2009. A revised burial dose estimation procedure for optical dating of young
 and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+
 Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
 track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+
 Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
 northern Australia. Part I: experimental design and statistical models.
-Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
-Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
-R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+Archaeometry 41, 339-364. \cr\cr 
+
+Galbraith, R.F., 2005. Statistics for
+Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr
+
+Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
 calculation and display in OSL dating: An overview and some recommendations.
-Quaternary Geochronology 11, 1-27. \cr\cr Olley, J.M., Roberts, R.G.,
-Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+Quaternary Geochronology 11, 1-27. \cr\cr 
+
+Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
 associated with human burials at Lake Mungo, Australia. Quaternary Science
-Reviews 25, 2469-2474.\cr\cr \bold{Further reading} \cr\cr Arnold, L.J. &
-Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+Reviews 25, 2469-2474.\cr\cr 
+
+\bold{Further reading} \cr\cr 
+
+Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
 (De) distributions: Implications for OSL dating of sediment mixtures.
-Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold, L.J.,
-2006. Statistical modelling of single grain quartz De distributions and an
+Quaternary Geochronology 4, 204-230. \cr\cr 
+
+Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an
 assessment of procedures for estimating burial dose. Quaternary Science
-Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
-Realizing the potential of fluvial archives using robust OSL chronologies.
-Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
-Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+Reviews 25, 2475-2502. \cr\cr 
+
+Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr\cr 
+
+Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
 of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
 \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
 obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
@@ -109,4 +134,7 @@ obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
 \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
 \code{\link{calc_MinDose}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr
+\cr R Luminescence Package Team}
diff --git a/man/calc_MinDose.Rd b/man/calc_MinDose.Rd
index c4efe80..5113d0c 100644
--- a/man/calc_MinDose.Rd
+++ b/man/calc_MinDose.Rd
@@ -11,15 +11,18 @@ calc_MinDose(data, sigmab, log = TRUE, par = 3, bootstrap = FALSE,
 \arguments{
 \item{data}{\code{\linkS4class{RLum.Results}} or \link{data.frame}
 (\bold{required}): for \code{data.frame}: two columns with De \code{(data[
-,1])} and De error \code{(values[ ,2])}}
+,1])} and De error \code{(data[ ,2])}.}
 
-\item{sigmab}{\code{\link{numeric}} (\bold{required}): spread in De values
-given as a fraction (e.g. 0.2). This value represents the expected
-overdispersion in the data should the sample be well-bleached (Cunningham &
-Walling 2012, p. 100).}
+\item{sigmab}{\code{\link{numeric}} (\bold{required}): additional spread in De values.
+This value represents the expected overdispersion in the data should the sample be
+well-bleached (Cunningham & Walling 2012, p. 100).
+\bold{NOTE}: For the logged model (\code{log = TRUE}) this value must be
+a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (\code{log = FALSE}),
+sigmab must be provided in the same absolute units of the De values (seconds or Gray).
+See details.}
 
 \item{log}{\code{\link{logical}} (with default): fit the (un-)logged minimum
-dose model to De data}
+dose model to De data.}
 
 \item{par}{\code{\link{numeric}} (with default): apply the 3- or
 4-parametric minimum age model (\code{par=3} or \code{par=4}). The MAM-3 is
@@ -41,12 +44,12 @@ required (defaults to 0.95).}
 
 \item{multicore}{\code{\link{logical}} (with default): enable parallel
 computation of the bootstrap by creating a multicore SNOW cluster. Depending
-on the number of available logical CPU cores this will drastically reduce
-the computation time. Note that this option is highly experimental and not
-work for all machines. (\code{TRUE}/\code{FALSE})}
+on the number of available logical CPU cores this may drastically reduce
+the computation time. Note that this option is highly experimental and may not
+work on all machines. (\code{TRUE}/\code{FALSE})}
 
 \item{\dots}{(optional) further arguments for bootstrapping (\code{bs.M,
-bs.N, bs.h, sigmab.sd}).  See details for their usage. Further arguments are
+bs.N, bs.h, sigmab.sd}). See details for their usage. Further arguments are
 \code{verbose} to de-/activate console output (logical), \code{debug} for
 extended console output (logical) and \code{cores} (integer) to manually
 specify the number of cores to be used when \code{multicore=TRUE}.}
@@ -65,34 +68,42 @@ for all parameters} \item{BIC}{\link{numeric} BIC score}
 \item{profile}{\link{profile.mle2} the log likelihood profiles}
 \item{bootstrap}{\link{list} bootstrap results}
 
-The output should be accessed using the function
-\code{\link{get_RLum}}
+The output should be accessed using the function \code{\link{get_RLum}}
 }
 \description{
 Function to fit the (un-)logged three or four parameter minimum dose model
 (MAM-3/4) to De data.
 }
 \details{
-\bold{Parameters} \cr\cr This model has four parameters: \cr\cr
+\bold{Parameters} \cr\cr
+This model has four parameters: \cr\cr
 \tabular{rl}{ \code{gamma}: \tab minimum dose on the log scale \cr
 \code{mu}: \tab mean of the non-truncated normal distribution \cr
 \code{sigma}: \tab spread in ages above the minimum \cr \code{p0}: \tab
 proportion of grains at gamma \cr } If \code{par=3} (default) the
 3-parametric minimum age model is applied, where \code{gamma=mu}. For
 \code{par=4} the 4-parametric model is applied instead.\cr\cr
-\bold{(Un-)logged model} \cr\cr In the original version of the
-three-parameter minimum dose model, the basic data are the natural
+
+\bold{(Un-)logged model} \cr\cr
+In the original version of the minimum dose model, the basic data are the natural
 logarithms of the De estimates and relative standard errors of the De
-estimates. This model will be applied if \code{log=TRUE}. \cr\cr If
-\code{log=FALSE}, the modified un-logged model will be applied instead. This
+estimates. The value for \code{sigmab} must be provided as a ratio
+(e.g, 0.2 for 20 \%). This model will be applied if \code{log=TRUE}. \cr\cr
+
+If \code{log=FALSE}, the modified un-logged model will be applied instead. This
 has essentially the same form as the original version.  \code{gamma} and
 \code{sigma} are in Gy and \code{gamma} becomes the minimum true dose in the
-population. \cr\cr While the original (logged) version of the mimimum dose
+population. \bold{Note} that the un-logged model requires \code{sigmab} to be in the same
+absolute unit as the provided De values (seconds or Gray). \cr\cr
+
+While the original (logged) version of the mimimum dose
 model may be appropriate for most samples (i.e. De distributions), the
 modified (un-logged) version is specially designed for modern-age and young
 samples containing negative, zero or near-zero De estimates (Arnold et al.
-2009, p. 323). \cr\cr \bold{Initial values & boundaries} \cr\cr The log
-likelihood calculations use the \link{nlminb} function for box-constrained
+2009, p. 323). \cr\cr
+
+\bold{Initial values & boundaries} \cr\cr
+The log likelihood calculations use the \link{nlminb} function for box-constrained
 optimisation using PORT routines.  Accordingly, initial values for the four
 parameters can be specified via \code{init.values}. If no values are
 provided for \code{init.values} reasonable starting values are estimated
@@ -104,8 +115,9 @@ longer required to be explicitly specified. If you want to override the default
 boundary values use the arguments \code{gamma.lower}, \code{gamma.upper},
 \code{sigma.lower}, \code{sigma.upper}, \code{p0.lower}, \code{p0.upper},
 \code{mu.lower} and \code{mu.upper}.  \cr\cr
-\bold{Bootstrap} \cr\cr When
-\code{bootstrap=TRUE} the function applies the bootstrapping method as
+
+\bold{Bootstrap} \cr\cr
+When \code{bootstrap=TRUE} the function applies the bootstrapping method as
 described in Wallinga & Cunningham (2012). By default, the minimum age model
 produces 1000 first level and 3000 second level bootstrap replicates
 (actually, the number of second level bootstrap replicates is three times
@@ -115,8 +127,10 @@ using the arguments \code{bs.M} (first level replicates), \code{bs.N}
 (second level replicates) and \code{sigmab.sd} (error on sigmab). With
 \code{bs.h} the bandwidth of the kernel density estimate can be specified.
 By default, \code{h} is calculated as \cr \deqn{h =
-(2*\sigma_{DE})/\sqrt{n}} \cr \bold{Multicore support} \cr\cr This function
-supports parallel computing and can be activated by \code{multicore=TRUE}.
+(2*\sigma_{DE})/\sqrt{n}} \cr
+
+\bold{Multicore support} \cr\cr
+This function supports parallel computing and can be activated by \code{multicore=TRUE}.
 By default, the number of available logical CPU cores is determined
 automatically, but can be changed with \code{cores}. The multicore support
 is only available when \code{bootstrap=TRUE} and spawns \code{n} R instances
@@ -126,7 +140,19 @@ work for your machine. Also the performance gain increases for larger number
 of bootstrap replicates. Also note that with each additional core and hence
 R instance and depending on the number of bootstrap replicates the memory
 usage can significantly increase. Make sure that memory is always availabe,
-otherwise there will be a massive perfomance hit.
+otherwise there will be a massive perfomance hit. \cr\cr
+
+\bold{Likelihood profiles}
+
+The likelihood profiles are generated and plotted by the \code{bbmle} package.
+The profile likelihood plots look different to ordinary profile likelihood as \cr\cr
+"[...] the plot method for likelihood profiles displays the square root of
+the the deviance difference (twice the difference in negative log-likelihood from
+the best fit), so it will be V-shaped for cases where the quadratic approximation
+works well [...]." (Bolker 2016). \cr\cr
+For more details on the profile likelihood
+calculations and plots please see the vignettes of the \code{bbmle} package
+(also available here: \url{https://CRAN.R-project.org/package=bbmle}).
 }
 \note{
 The default starting values for \emph{gamma}, \emph{mu}, \emph{sigma}
@@ -138,10 +164,10 @@ model with \code{debug=TRUE} which provides extended console output and
 forwards all internal warning messages.
 }
 \section{Function version}{
- 0.4.3 (2016-09-09 10:32:17)
+ 0.4.4 (2017-06-29 18:40:14)
 }
-\examples{
 
+\examples{
 
 ## Load example data
 data(ExampleData.DeValues, envir = environment())
@@ -150,6 +176,7 @@ data(ExampleData.DeValues, envir = environment())
 # By default, this will apply the un-logged 3-parametric MAM.
 calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1)
 
+\dontrun{
 # (2) Re-run the model, but save results to a variable and turn
 # plotting of the log-likelihood profiles off.
 mam <- calc_MinDose(data = ExampleData.DeValues$CA1,
@@ -190,7 +217,7 @@ plot_AbanicoPlot(data = ExampleData.DeValues$CA1,
                                   rho == .(round(res$p0, 2))))
 
 
-\dontrun{
+
 # (3) Run the minimum age model with bootstrap
 # NOTE: Bootstrapping is computationally intensive
 # (3.1) run the minimum age model with default values for bootstrapping
@@ -231,37 +258,54 @@ plot(bs$poly.fits$poly.three, ask = FALSE)
 summary(bs$poly.fits$poly.three$fitted.values)
 }
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). calc_MinDose(): Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999) to a given De distribution. Function version 0.4.4. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany) \cr Based on a
-rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is
-based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair
-Cunningham is thanked for his help in implementing and cross-checking the
-code.
-\cr R Luminescence Package Team}
+
 \references{
 Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B.,
 2009. A revised burial dose estimation procedure for optical dating of young
 and modern-age sediments. Quaternary Geochronology 4, 306-325. \cr\cr
+
 Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission
 track ages. Nuclear Tracks Radiation Measurements 4, 459-470. \cr\cr
+
 Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M.,
 1999. Optical dating of single grains of quartz from Jinmium rock shelter,
 northern Australia. Part I: experimental design and statistical models.
-Archaeometry 41, 339-364. \cr\cr Galbraith, R.F., 2005. Statistics for
-Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr Galbraith,
-R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
+Archaeometry 41, 339-364. \cr\cr
+
+Galbraith, R.F., 2005. Statistics for
+Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \cr\cr
+
+Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error
 calculation and display in OSL dating: An overview and some recommendations.
-Quaternary Geochronology 11, 1-27. \cr\cr \bold{Further reading} \cr\cr
-Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain
-equivalent dose (De) distributions: Implications for OSL dating of sediment
-mixtures. Quaternary Geochronology 4, 204-230. \cr\cr Bailey, R.M. & Arnold,
-L.J., 2006. Statistical modelling of single grain quartz De distributions
-and an assessment of procedures for estimating burial dose. Quaternary
-Science Reviews 25, 2475-2502. \cr\cr Cunningham, A.C. & Wallinga, J., 2012.
-Realizing the potential of fluvial archives using robust OSL chronologies.
-Quaternary Geochronology 12, 98-106. \cr\cr Rodnight, H., Duller, G.A.T.,
-Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
+Quaternary Geochronology 11, 1-27. \cr\cr
+
+Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill
+associated with human burials at Lake Mungo, Australia. Quaternary Science
+Reviews 25, 2469-2474.\cr\cr
+
+\bold{Further reading} \cr\cr
+
+Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose
+(De) distributions: Implications for OSL dating of sediment mixtures.
+Quaternary Geochronology 4, 204-230. \cr\cr
+
+Bolker, B., 2016. Maximum likelihood estimation analysis with the bbmle package.
+In: Bolker, B., R Development Core Team, 2016. bbmle: Tools for General Maximum Likelihood Estimation.
+R package version 1.0.18. https://CRAN.R-project.org/package=bbmle \cr\cr
+
+Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an
+assessment of procedures for estimating burial dose. Quaternary Science
+Reviews 25, 2475-2502. \cr\cr
+
+Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies.
+Quaternary Geochronology 12, 98-106. \cr\cr
+
+Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy
 of optical dating of fluvial deposits.  Quaternary Geochronology 1, 109-120.
 \cr\cr Rodnight, H., 2008. How many equivalent dose values are needed to
 obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
@@ -271,4 +315,10 @@ obtain a reproducible distribution?. Ancient TL 26, 3-10. \cr\cr
 \code{\link{calc_FiniteMixture}}, \code{\link{calc_FuchsLang2001}},
 \code{\link{calc_MaxDose}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany) \cr Based on a
+rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is
+based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair
+Cunningham is thanked for his help in implementing and cross-checking the
+code.
+\cr R Luminescence Package Team}
diff --git a/man/calc_OSLLxTxRatio.Rd b/man/calc_OSLLxTxRatio.Rd
index 0865d0b..9e3b4fb 100644
--- a/man/calc_OSLLxTxRatio.Rd
+++ b/man/calc_OSLLxTxRatio.Rd
@@ -4,11 +4,11 @@
 \alias{calc_OSLLxTxRatio}
 \title{Calculate Lx/Tx ratio for CW-OSL curves}
 \usage{
-calc_OSLLxTxRatio(Lx.data, Tx.data, signal.integral,
+calc_OSLLxTxRatio(Lx.data, Tx.data = NULL, signal.integral,
   signal.integral.Tx = NULL, background.integral,
   background.integral.Tx = NULL,
-  background.count.distribution = "non-poisson", sigmab = NULL, sig0 = 0,
-  digits = NULL)
+  background.count.distribution = "non-poisson", use_previousBG = FALSE,
+  sigmab = NULL, sig0 = 0, digits = NULL)
 }
 \arguments{
 \item{Lx.data}{\code{\linkS4class{RLum.Data.Curve}} or \link{data.frame}
@@ -37,6 +37,10 @@ value from \code{background.integral} is used.}
 the count distribution assumed for the error calculation. Possible arguments
 \code{poisson} or \code{non-poisson}. See details for further information}
 
+\item{use_previousBG}{\code{\link{logical}} (with default): If set to \code{TRUE} the background
+of the Lx-signal is substracted also from the Tx-signal. Please note that in this case separat
+signal integral limits for the Tx signal are not allowed and will be reset.}
+
 \item{sigmab}{\code{\link{numeric}} (optional): option to set a manual value for
 the overdispersion (for LnTx and TnTx), used for the Lx/Tx error
 calculation. The value should be provided as absolute squared count values,
@@ -53,6 +57,8 @@ digits is set to \code{NULL} nothing is rounded.}
 Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
 
 Slot \code{data} contains a \code{\link{list}} with the following structure:\cr
+
+\bold{@data}\cr
 $LxTx.table (data.frame) \cr
 .. $ LnLx \cr
 .. $ LnLx.BG \cr
@@ -67,6 +73,8 @@ $ calc.parameters (list) \cr
 .. $ sigmab.LnTx\cr
 .. $ sigmab.TnTx\cr
 .. $ k \cr
+
+\bold{@info}\cr
 $ call (original function call)\cr
 }
 \description{
@@ -124,8 +132,9 @@ The results of this function have been cross-checked with the Analyst
 own \code{sigmab} value or use \code{background.count.distribution = "poisson"}.
 }
 \section{Function version}{
- 0.6.3 (2016-09-09 10:32:17)
+ 0.7.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load data
@@ -138,11 +147,12 @@ results <- calc_OSLLxTxRatio(Lx.data, Tx.data, signal.integral = c(1:2),
 ##get results object
 get_RLum(results)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). calc_OSLLxTxRatio(): Calculate Lx/Tx ratio for CW-OSL curves. Function version 0.7.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Duller, G., 2007. Analyst.
 \url{http://www.nutech.dtu.dk/english/~/media/Andre_Universitetsenheder/Nutech/Produkter\%20og\%20services/Dosimetri/radiation_measurement_instruments/tl_osl_reader/Manuals/analyst_manual_v3_22b.ashx}\cr
@@ -158,5 +168,8 @@ background-corrected OSL count. Ancient TL, 31 (2), 1-3.
 \code{\link{Analyse_SAR.OSLdata}}, \code{\link{plot_GrowthCurve}},
 \code{\link{analyse_SAR.CWOSL}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/calc_SourceDoseRate.Rd b/man/calc_SourceDoseRate.Rd
index 0312dd7..7887e4b 100644
--- a/man/calc_SourceDoseRate.Rd
+++ b/man/calc_SourceDoseRate.Rd
@@ -88,8 +88,9 @@ it is not recommended to use this option when multiple calibration dates (\code{
 are provided.
 }
 \section{Function version}{
- 0.3.0 (2015-11-29 17:27:48)
+ 0.3.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -127,11 +128,12 @@ xtable::xtable(get_RLum(dose.rate))
 }
 
 
+} 
+
+\section{How to cite}{
+Fuchs, M.C., Fuchs, M., Kreutzer, S. (2017). calc_SourceDoseRate(): Calculation of the source dose rate via the date of measurement. Function version 0.3.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany),
-\cr Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 NNDC, Brookhaven National Laboratory
 (\code{http://www.nndc.bnl.gov/})
@@ -139,5 +141,8 @@ NNDC, Brookhaven National Laboratory
 \seealso{
 \code{\link{Second2Gray}}, \code{\link{get_RLum}}, \code{\link{plot_RLum}}
 }
+\author{
+Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany),
+\cr Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/calc_Statistics.Rd b/man/calc_Statistics.Rd
index c8f5d24..d43f319 100644
--- a/man/calc_Statistics.Rd
+++ b/man/calc_Statistics.Rd
@@ -4,7 +4,7 @@
 \alias{calc_Statistics}
 \title{Function to calculate statistic measures}
 \usage{
-calc_Statistics(data, weight.calc = "square", digits = NULL, n.MCM = 1000,
+calc_Statistics(data, weight.calc = "square", digits = NULL, n.MCM = NULL,
   na.rm = TRUE)
 }
 \arguments{
@@ -21,7 +21,7 @@ out of \code{"reciprocal"} (weight is 1/error), \code{"square"} (weight is
 specified digits. If digits is set to \code{NULL} nothing is rounded.}
 
 \item{n.MCM}{\code{\link{numeric}} (with default): number of samples drawn
-for Monte Carlo-based statistics. Set to zero to disable this option.}
+for Monte Carlo-based statistics. \code{NULL} (the default) disables MC runs.}
 
 \item{na.rm}{\code{\link{logical}} (with default): indicating whether NA
 values should be stripped before the computation proceeds.}
@@ -30,11 +30,11 @@ values should be stripped before the computation proceeds.}
 Returns a list with weighted and unweighted statistic measures.
 }
 \description{
-This function calculates a number of descriptive statistics for De-data,
-most fundamentally using error-weighted approaches.
+This function calculates a number of descriptive statistics for estimates
+with a given standard error (SE), most fundamentally using error-weighted approaches.
 }
 \details{
-The option to use Monte Carlo Methods (\code{n.MCM > 0}) allows calculating
+The option to use Monte Carlo Methods (\code{n.MCM}) allows calculating
 all descriptive statistics based on random values. The distribution of these
 random values is based on the Normal distribution with \code{De} values as
 means and \code{De_error} values as one standard deviation. Increasing the
@@ -45,8 +45,9 @@ values. See Dietze et al. (2016, Quaternary Geochronology) and the function
 \code{\link{plot_AbanicoPlot}} for details.
 }
 \section{Function version}{
- 0.1.6 (2016-05-16 22:14:31)
+ 0.1.7 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -70,6 +71,10 @@ str(calc_Statistics(x))
 }
 \author{
 Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
-\keyword{datagen}
+\cr R Luminescence Package Team} 
 
+\section{How to cite}{
+Dietze, M. (2017). calc_Statistics(): Function to calculate statistic measures. Function version 0.1.7. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{datagen}
diff --git a/man/calc_TLLxTxRatio.Rd b/man/calc_TLLxTxRatio.Rd
index c59c7c9..ec0011e 100644
--- a/man/calc_TLLxTxRatio.Rd
+++ b/man/calc_TLLxTxRatio.Rd
@@ -4,8 +4,8 @@
 \alias{calc_TLLxTxRatio}
 \title{Calculate the Lx/Tx ratio for a given set of TL curves [beta version]}
 \usage{
-calc_TLLxTxRatio(Lx.data.signal, Lx.data.background, Tx.data.signal,
-  Tx.data.background, signal.integral.min, signal.integral.max)
+calc_TLLxTxRatio(Lx.data.signal, Lx.data.background = NULL, Tx.data.signal,
+  Tx.data.background = NULL, signal.integral.min, signal.integral.max)
 }
 \arguments{
 \item{Lx.data.signal}{\code{\linkS4class{RLum.Data.Curve}} or
@@ -42,14 +42,24 @@ $ LxTx.table \cr .. $ LnLx \cr .. $ LnLx.BG \cr .. $ TnTx \cr .. $ TnTx.BG
 Calculate Lx/Tx ratio for a given set of TL curves.
 }
 \details{
--
+\bold{Uncertainty estimation}\cr
+
+The standard errors are calculated using the following generalised equation:
+
+\deqn{SE_{signal} <- abs(Signal_{net} * BG_f /BG_{signal}}
+
+where \eqn{BG_f} is a term estimated by calculating the standard deviation of the sum of
+the \eqn{L_x} background counts and the sum of the \eqn{T_x} background counts. However,
+if both signals are similar the error becomes zero.
 }
 \note{
-\bold{This function has still BETA status!}
+\bold{This function has still BETA status!} Please further note that a similar
+background for both curves results in a zero error and is therefore set to \code{NA}.
 }
 \section{Function version}{
- 0.3.0 (2015-11-29 17:27:48)
+ 0.3.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -73,16 +83,20 @@ output <- calc_TLLxTxRatio(Lx.data.signal,
                            signal.integral.min, signal.integral.max)
 get_RLum(output)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Schmidt, C. (2017). calc_TLLxTxRatio(): Calculate the Lx/Tx ratio for a given set of TL curves [beta version]. Function version 0.3.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France), Christoph Schmidt, University of Bayreuth (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
 \seealso{
 \code{\linkS4class{RLum.Results}}, \code{\link{analyse_SAR.TL}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Christoph Schmidt, University of Bayreuth (Germany)
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/calc_ThermalLifetime.Rd b/man/calc_ThermalLifetime.Rd
index 2108721..9e76661 100644
--- a/man/calc_ThermalLifetime.Rd
+++ b/man/calc_ThermalLifetime.Rd
@@ -89,8 +89,9 @@ The profiling is currently based on resampling from a normal distribution, this
 distribution assumption might be, however, not valid for given E and s paramters.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##EXAMPLE 1
@@ -124,10 +125,12 @@ calc_ThermalLifetime(
   output_unit = "Ma"
 )
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). calc_ThermalLifetime(): Calculates the Thermal Lifetime using the Arrhenius equation. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Furetta, C., 2010. Handbook of Thermoluminescence, Second Edition. ed.
 World Scientific.
@@ -135,5 +138,7 @@ World Scientific.
 \seealso{
 \code{\link[graphics]{matplot}}, \code{\link[stats]{rnorm}}, \code{\link{get_RLum}},
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/calc_gSGC.Rd b/man/calc_gSGC.Rd
index 4a1ffa6..64516b6 100644
--- a/man/calc_gSGC.Rd
+++ b/man/calc_gSGC.Rd
@@ -57,8 +57,9 @@ Solving of the equation is realised using \code{\link{uniroot}}.
 Large values for \code{n.MC} will significantly increase the computation time.
 }
 \section{Function version}{
- 0.1.1 (2016-09-09 10:32:17)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 results <- calc_gSGC(data = data.frame(
 LnTn =  2.361, LnTn.error = 0.087,
@@ -67,10 +68,12 @@ Dr1 = 34.4))
 
 get_RLum(results, data.object = "De")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). calc_gSGC(): Calculate De value based on the gSGC by Li et al., 2015. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
-\cr R Luminescence Package Team}
+
 \references{
 Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., 2015. Potential of establishing
 a 'global standardised growth curve' (gSGC) for optical dating of quartz from sediments.
@@ -79,5 +82,7 @@ Quaternary Geochronology 27, 94-104. doi:10.1016/j.quageo.2015.02.011
 \seealso{
 \code{\linkS4class{RLum.Results}}, \code{\link{get_RLum}}, \code{\link{uniroot}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+\cr R Luminescence Package Team}
 \keyword{datagen}
-
diff --git a/man/convert_BIN2CSV.Rd b/man/convert_BIN2CSV.Rd
new file mode 100644
index 0000000..beb71cd
--- /dev/null
+++ b/man/convert_BIN2CSV.Rd
@@ -0,0 +1,56 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/convert_BIN2CSV.R
+\name{convert_BIN2CSV}
+\alias{convert_BIN2CSV}
+\title{Export Risoe BIN-file(s) to CSV-files}
+\usage{
+convert_BIN2CSV(file, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} (\bold{required}): name of the BIN-file to be converted to CSV-files}
+
+\item{\dots}{further arguments that will be passed to the function \code{\link{read_BIN2R}} and \code{\link{write_RLum2CSV}}}
+}
+\value{
+The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE}
+a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+}
+\description{
+This function is a wrapper function around the functions \code{\link{read_BIN2R}} and
+\code{\link{write_RLum2CSV}} and it imports a Risoe BIN-file and directly exports its content to CSV-files.
+If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+become the output folder.
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+##transform Risoe.BINfileData values to a list
+data(ExampleData.BINfileData, envir = environment())
+convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE)
+
+\dontrun{
+##select your BIN-file
+file <- file.choose()
+
+##convert
+convert_BIN2CSV(file)
+
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+\code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_BIN2R}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). convert_BIN2CSV(): Export Risoe BIN-file(s) to CSV-files. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/convert_Daybreak2CSV.Rd b/man/convert_Daybreak2CSV.Rd
new file mode 100644
index 0000000..4519cff
--- /dev/null
+++ b/man/convert_Daybreak2CSV.Rd
@@ -0,0 +1,52 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/convert_Daybreak2CSV.R
+\name{convert_Daybreak2CSV}
+\alias{convert_Daybreak2CSV}
+\title{Export measurement data produced by a Daybreak luminescence reader to CSV-files}
+\usage{
+convert_Daybreak2CSV(file, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} (\bold{required}): name of the Daybreak-file (TXT-file, DAT-file) to be converted to CSV-files}
+
+\item{\dots}{further arguments that will be passed to the function \code{\link{read_Daybreak2R}} and \code{\link{write_RLum2CSV}}}
+}
+\value{
+The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+}
+\description{
+This function is a wrapper function around the functions \code{\link{read_Daybreak2R}} and
+\code{\link{write_RLum2CSV}} and it imports an Daybreak-file (TXT-file, DAT-file)
+and directly exports its content to CSV-files.  If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}})
+the input folder will become the output folder.
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+\dontrun{
+##select your BIN-file
+file <- file.choose()
+
+##convert
+convert_Daybreak2CSV(file)
+
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+\code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_Daybreak2R}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). convert_Daybreak2CSV(): Export measurement data produced by a Daybreak luminescence reader to CSV-files. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/convert_PSL2CSV.Rd b/man/convert_PSL2CSV.Rd
new file mode 100644
index 0000000..461fd42
--- /dev/null
+++ b/man/convert_PSL2CSV.Rd
@@ -0,0 +1,53 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/convert_PSL2CSV.R
+\name{convert_PSL2CSV}
+\alias{convert_PSL2CSV}
+\title{Export PSL-file(s) to CSV-files}
+\usage{
+convert_PSL2CSV(file, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} (\bold{required}): name of the PSL-file to be converted to CSV-files}
+
+\item{\dots}{further arguments that will be passed to the function \code{\link{read_PSL2R}} and \code{\link{write_RLum2CSV}}}
+}
+\value{
+The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+}
+\description{
+This function is a wrapper function around the functions \code{\link{read_PSL2R}} and
+\code{\link{write_RLum2CSV}} and it imports an PSL-file (SUERC portable OSL reader file format)
+and directly exports its content to CSV-files.
+If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+become the output folder.
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+\dontrun{
+##select your BIN-file
+file <- file.choose()
+
+##convert
+convert_PSL2CSV(file)
+
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+\code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_PSL2R}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). convert_PSL2CSV(): Export PSL-file(s) to CSV-files. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/convert_XSYG2CSV.Rd b/man/convert_XSYG2CSV.Rd
new file mode 100644
index 0000000..9717d69
--- /dev/null
+++ b/man/convert_XSYG2CSV.Rd
@@ -0,0 +1,56 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/convert_XSYG2CSV.R
+\name{convert_XSYG2CSV}
+\alias{convert_XSYG2CSV}
+\title{Export XSYG-file(s) to CSV-files}
+\usage{
+convert_XSYG2CSV(file, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} (\bold{required}): name of the XSYG-file to be converted to CSV-files}
+
+\item{\dots}{further arguments that will be passed to the function \code{\link{read_XSYG2R}} and \code{\link{write_RLum2CSV}}}
+}
+\value{
+The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE}
+a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+}
+\description{
+This function is a wrapper function around the functions \code{\link{read_XSYG2R}} and
+\code{\link{write_RLum2CSV}} and it imports an XSYG-file and directly exports its content to CSV-files.
+If nothing is set for the argument \code{path} (\code{\link{write_RLum2CSV}}) the input folder will
+become the output folder.
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+##transform XSYG-file values to a list
+data(ExampleData.XSYG, envir = environment())
+convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE)
+
+\dontrun{
+##select your BIN-file
+file <- file.choose()
+
+##convert
+convert_XSYG2CSV(file)
+
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+\code{\link[utils]{write.table}}, \code{\link{write_RLum2CSV}}, \code{\link{read_XSYG2R}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). convert_XSYG2CSV(): Export XSYG-file(s) to CSV-files. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/extract_IrradiationTimes.Rd b/man/extract_IrradiationTimes.Rd
index 70bfcf4..3da091b 100644
--- a/man/extract_IrradiationTimes.Rd
+++ b/man/extract_IrradiationTimes.Rd
@@ -2,7 +2,7 @@
 % Please edit documentation in R/extract_IrradiationTimes.R
 \name{extract_IrradiationTimes}
 \alias{extract_IrradiationTimes}
-\title{Extract irradiation times from an XSYG file}
+\title{Extract Irradiation Times from an XSYG-file}
 \usage{
 extract_IrradiationTimes(object, file.BINX, recordType = c("irradiation (NA)",
   "IRSL (UVVIS)", "OSL (UVVIS)", "TL (UVVIS)"), compatibility.mode = TRUE,
@@ -50,8 +50,8 @@ following structure:\cr .. $irr.times (data.frame)\cr
 If a BINX-file path and name is set, the output will be additionally
 transferred into a new BINX-file with the function name as suffix. For the
 output the path of the input BINX-file itself is used. Note that this will
-not work if the input object is a file path to an XSYG-file. In this case
-the argument input is ignored.\cr
+not work if the input object is a file path to an XSYG-file, instead of a
+link to only one file. In this case the argument input for \code{file.BINX} is ignored.\cr
 
 In the self call mode (input is a \code{list} of \code{\linkS4class{RLum.Analysis}} objects
 a list of \code{\linkS4class{RLum.Results}} is returned.
@@ -59,14 +59,14 @@ a list of \code{\linkS4class{RLum.Results}} is returned.
 \description{
 Extracts irradiation times, dose and times since last irradiation, from a
 Freiberg Instruments XSYG-file. These information can be further used to
-update an existing BINX-file
+update an existing BINX-file.
 }
 \details{
 The function was written to compensate missing information in the BINX-file
 output of Freiberg Instruments lexsyg readers. As all information are
 available within the XSYG-file anyway, these information can be extracted
 and used for further analysis or/and to stored in a new BINX-file, which can
-be further used by other software, e.g. Analyst (Geoff Duller). \cr
+be further used by other software, e.g., Analyst (Geoff Duller). \cr
 
 Typical application example: g-value estimation from fading measurements
 using the Analyst or any other self written script.\cr
@@ -81,14 +81,17 @@ keep the output transparent. However, for the BINX-file export this steps
 are removed as the BINX-file format description does not allow irradiations
 as separat sequences steps.\cr
 
-Know issue: The 'fading correction' menu in the Analyst will not work appear
-with the produced BIN/BINX-file due to hidden bits, which are not reproduced
-by the function \code{write_R2BIN()} or if it appears it stops with a
-floating point error. \cr
+BINX-file 'Time Since Irradiation' value differs from the table output?\cr
 
-Negative values for \code{TIMESINCELAS.STEP}? Yes, this is possible and no
-bug, as in the XSYG file multiple curves are stored for one step. Example: A
-TL step may comprise three curves: (a) counts vs. time, (b) measured
+The way the value 'Time Since Irradiation' is defined differs. In the BINX-file the
+'Time Since Irradiation' is calculated as the 'Time Since Irradiation' plus the 'Irradiation
+Time'. The table output returns only the real 'Time Since Irradiation', i.e. time between the
+end of the irradiation and the next step.
+
+Negative values for \code{TIMESINCELAS.STEP}? \cr
+
+Yes, this is possible and no bug, as in the XSYG-file multiple curves are stored for one step.
+Example: TL step may comprise three curves: (a) counts vs. time, (b) measured
 temperature vs. time and (c) predefined temperature vs. time. Three curves,
 but they are all belonging to one TL measurement step, but with regard to
 the time stamps this could produce negative values as the important function
@@ -96,8 +99,9 @@ the time stamps this could produce negative values as the important function
 towards a correct time order.
 }
 \section{Function version}{
- 0.3.0 (2016-05-03 11:10:26)
+ 0.3.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -117,19 +121,24 @@ towards a correct time order.
 #                   sep = ";",
 #                   row.names = FALSE)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). extract_IrradiationTimes(): Extract Irradiation Times from an XSYG-file. Function version 0.3.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
-Duller, G., 2007. Analyst.
+Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and
+recent improvements. Ancient TL 33, 35-42.
 }
 \seealso{
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}, \code{\linkS4class{Risoe.BINfileData}},
 \code{\link{read_XSYG2R}}, \code{\link{read_BIN2R}}, \code{\link{write_R2BIN}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{IO}
 \keyword{manip}
-
diff --git a/man/fit_CWCurve.Rd b/man/fit_CWCurve.Rd
index 3e8c1d5..c0af0e5 100644
--- a/man/fit_CWCurve.Rd
+++ b/man/fit_CWCurve.Rd
@@ -107,9 +107,9 @@ uses the \code{\link{nls}} function with the \code{port} algorithm.
 \bold{Fitting function}\cr\cr The function for the CW-OSL fitting has the
 general form: \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, +
 I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } where \eqn{0 < i < 8}\cr\cr and
-\eqn{\lambda} is the decay constant and \eqn{N0} the intial number of
+\eqn{\lambda} is the decay constant and \eqn{I0} the intial number of
 trapped electrons.\cr (for the used equation cf. Boetter-Jensen et al.,
-2003)\cr\cr \bold{Start values}\cr
+2003, Eq. 2.31)\cr\cr \bold{Start values}\cr
 
 Start values are estimated automatically by fitting a linear function to the
 logarithmized input data set. Currently, there is no option to manually
@@ -137,8 +137,9 @@ the fitting procedure has reached a global minimum rather than a local
 minimum!
 }
 \section{Function version}{
- 0.5.1 (2015-11-29 17:27:48)
+ 0.5.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -151,11 +152,12 @@ fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve,
                    n.components.max = 4,
                    log = "x")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). fit_CWCurve(): Nonlinear Least Squares Fit for CW-OSL curves [beta version]. Function version 0.5.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Boetter-Jensen, L., McKeever, S.W.S., Wintle, A.G., 2003.
 Optically Stimulated Luminescence Dosimetry. Elsevier Science B.V.
@@ -171,6 +173,9 @@ Gentleman, K. Hornik, G. Parmigiani, eds., Springer, p. 150.
 \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Results}},
 \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{dplot}
 \keyword{models}
-
diff --git a/man/fit_LMCurve.Rd b/man/fit_LMCurve.Rd
index fd60eb5..f9d74f3 100644
--- a/man/fit_LMCurve.Rd
+++ b/man/fit_LMCurve.Rd
@@ -82,10 +82,12 @@ Various types of plots are returned. For details see above.\cr
 Furthermore an \code{RLum.Results} object is returned with the following structure:\cr
 
 data:\cr
+.. $data : \code{data.frame} with fitting results\cr
 .. $fit : \code{nls} (nls object)\cr
-.. $output.table : \code{data.frame} with fitting results\cr
 .. $component.contribution.matrix : \code{list} component distribution matrix\cr
-.. $call : \code{call} the original function call
+
+info:\cr
+.. $call : \code{call} the original function call\cr
 
 Matrix structure for the distribution matrix:\cr
 
@@ -159,8 +161,9 @@ global minimum rather than a local minimum! In any case of doubt, the use of
 manual start values is highly recommended.
 }
 \section{Function version}{
- 0.3.1 (2016-05-02 09:36:06)
+ 0.3.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -185,11 +188,12 @@ fit_LMCurve(values = values.curve,
             log = "x",
             start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500)))
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). fit_LMCurve(): Nonlinear Least Squares Fit for LM-OSL curves. Function version 0.3.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Bulur, E., 1996. An Alternative Technique For Optically
 Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 5,
@@ -213,6 +217,9 @@ K. Hornik, & G. Parmigiani, eds., Springer, p. 150.
 \code{\link{fit_CWCurve}}, \code{\link{plot}}, \code{\link{nls}},
 \code{\link[minpack.lm]{nlsLM}}, \code{\link{get_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{dplot}
 \keyword{models}
-
diff --git a/man/get_Layout.Rd b/man/get_Layout.Rd
index 5798a84..1325dd7 100644
--- a/man/get_Layout.Rd
+++ b/man/get_Layout.Rd
@@ -28,8 +28,9 @@ create either an empty or a default layout object and fill/modify the
 definitions (\code{user.layout <- get_Layout(data = "empty")}).
 }
 \section{Function version}{
- 0.1 (2016-05-17 22:39:50)
+ 0.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## read example data set
@@ -56,5 +57,9 @@ plot_AbanicoPlot(data = ExampleData.DeValues,
 }
 \author{
 Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Dietze, M. (2017). get_Layout(): Collection of layout definitions. Function version 0.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/get_Quote.Rd b/man/get_Quote.Rd
index c4d9da2..c25820f 100644
--- a/man/get_Quote.Rd
+++ b/man/get_Quote.Rd
@@ -22,8 +22,9 @@ growing library. If called without any parameters, a random quote is
 returned.
 }
 \section{Function version}{
- 0.1.1 (2016-09-09 10:32:17)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## ask for an arbitrary qoute
@@ -32,5 +33,9 @@ get_Quote()
 }
 \author{
 Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Dietze, M. (2017). get_Quote(): Function to return essential quotes. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/get_RLum.Rd b/man/get_RLum.Rd
index a86b0e3..c9893b6 100644
--- a/man/get_RLum.Rd
+++ b/man/get_RLum.Rd
@@ -15,13 +15,13 @@ get_RLum(object, ...)
 class \code{RLum} or an object of type \code{\link{list}} containing only objects of type
 \code{\linkS4class{RLum}}}
 
-\item{null.rm}{\code{\link{logical}} (with default): option to get rid of empty and NULL objects}
-
 \item{\dots}{further arguments that will be passed to the object specific methods. For
 furter details on the supported arguments please see the class
 documentation: \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Data.Image}},
 \code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}}
+
+\item{null.rm}{\code{\link{logical}} (with default): option to get rid of empty and NULL objects}
 }
 \value{
 Return is the same as input objects as provided in the list.
@@ -39,9 +39,11 @@ in the documentations of the corresponding \code{\linkS4class{RLum}} class.
 \itemize{
 \item \code{list}: Returns a list of \code{\linkS4class{RLum}} objects that had been passed to \code{\link{get_RLum}}
 }}
+
 \section{Function version}{
- 0.3.0 (2016-05-02 09:40:57)
+ 0.3.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -58,10 +60,6 @@ temp.get <- get_RLum(object = temp1)
 
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
 \seealso{
 \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Image}},
@@ -69,5 +67,13 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}
 }
-\keyword{utilities}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). get_RLum(): General accessor function for RLum S4 class objects. Function version 0.3.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
+\keyword{utilities}
diff --git a/man/get_Risoe.BINfileData.Rd b/man/get_Risoe.BINfileData.Rd
index 360f8c1..65f796a 100644
--- a/man/get_Risoe.BINfileData.Rd
+++ b/man/get_Risoe.BINfileData.Rd
@@ -26,14 +26,19 @@ corresponding get function will be selected. Allowed arguments can be found
 in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
 }
 \section{Function version}{
- 0.1.0 (2015-11-29 17:27:48)
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}
 }
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
-\cr R Luminescence Package Team}
-\seealso{
-\code{\linkS4class{Risoe.BINfileData}}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). get_Risoe.BINfileData(): General accessor function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\keyword{utilities}
 
+\keyword{utilities}
diff --git a/man/get_rightAnswer.Rd b/man/get_rightAnswer.Rd
index 9c18680..23e77ab 100644
--- a/man/get_rightAnswer.Rd
+++ b/man/get_rightAnswer.Rd
@@ -16,8 +16,9 @@ Returns the right answer
 This function returns just the right answer
 }
 \section{Function version}{
- 0.1.0 (2015-11-29 17:27:48)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## you really want to know?
@@ -26,5 +27,9 @@ get_rightAnswer()
 }
 \author{
 inspired by R.G.
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+NA, NA, ,  (2017). get_rightAnswer(): Function to get the right answer. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/install_DevelopmentVersion.Rd b/man/install_DevelopmentVersion.Rd
new file mode 100644
index 0000000..519c2d0
--- /dev/null
+++ b/man/install_DevelopmentVersion.Rd
@@ -0,0 +1,44 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/install_DevelopmentVersion.R
+\name{install_DevelopmentVersion}
+\alias{install_DevelopmentVersion}
+\title{Attempts to install the development version of the 'Luminescence' package}
+\usage{
+install_DevelopmentVersion(force_install = FALSE)
+}
+\arguments{
+\item{force_install}{\code{\link{logical}} (optional):
+If \code{FALSE} (the default) the function produces and prints the required
+code to the console for the user to run manually afterwards. When \code{TRUE}
+and all requirements are fulfilled (see details) this function attempts to install
+the package itself.}
+}
+\value{
+This function requires user input at the command prompt to choose the 
+desired development branch to be installed. The required R code to install
+the package is then printed to the console.
+}
+\description{
+This function is a convenient method for installing the development
+version of the R package 'Luminescence' directly from GitHub.
+}
+\details{
+This function uses \code{\link[Luminescence]{github_branches}} to check
+which development branches of the R package 'Luminescence' are currently
+available on GitHub. The user is then prompted to choose one of the branches
+to be installed. It further checks whether the R package 'devtools' is 
+currently installed and available on the system. Finally, it prints R code
+to the console that the user can copy and paste to the R console in order
+to install the desired development version of the package.\cr\cr
+
+If \code{force_install=TRUE} the functions checks if 'devtools' is available
+and then attempts to install the chosen development branch via
+\code{\link[devtools]{install_github}}.
+}
+\examples{
+
+\dontrun{
+install_DevelopmentVersion()
+}
+
+}
diff --git a/man/length_RLum.Rd b/man/length_RLum.Rd
index 9ff3cf7..eeb9b11 100644
--- a/man/length_RLum.Rd
+++ b/man/length_RLum.Rd
@@ -23,12 +23,9 @@ corresponding get function will be selected. Allowed arguments can be found
 in the documentations of the corresponding \code{\linkS4class{RLum}} class.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.0 (2017-06-29 18:40:14)
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \seealso{
 \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Image}},
@@ -36,5 +33,13 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}
 }
-\keyword{utilities}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). length_RLum(): General accessor function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{utilities}
diff --git a/man/merge_RLum.Analysis.Rd b/man/merge_RLum.Analysis.Rd
index f062708..65fae26 100644
--- a/man/merge_RLum.Analysis.Rd
+++ b/man/merge_RLum.Analysis.Rd
@@ -37,8 +37,9 @@ least one object of type \code{\linkS4class{RLum.Analysis}} has to be
 provided.
 }
 \section{Function version}{
- 0.2.0 (2016-05-02 09:36:06)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -51,11 +52,12 @@ curve <- get_RLum(object)[[2]]
 
 temp.merged <- merge_RLum.Analysis(list(curve, IRSAR.RF.Data, IRSAR.RF.Data))
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). merge_RLum.Analysis(): Merge function for RLum.Analysis S4 class objects. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
@@ -65,5 +67,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Data.Spectrum}},
 \code{\linkS4class{RLum.Data.Image}}, \code{\linkS4class{RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{utilities}
-
diff --git a/man/merge_RLum.Data.Curve.Rd b/man/merge_RLum.Data.Curve.Rd
index ce1ee48..d701790 100644
--- a/man/merge_RLum.Data.Curve.Rd
+++ b/man/merge_RLum.Data.Curve.Rd
@@ -69,6 +69,12 @@ The min values from the count values is chosen using the function
 The max values from the count values is chosen using the function
 \code{\link[matrixStats]{rowMins}}.
 
+\code{"append"}\cr
+
+Appends count values of all curves to one combined data curve. The channel width
+is automatically re-calculated, but requires a constant channel width of the 
+original data.
+
 \code{"-"}\cr
 
 The row sums of the last objects are subtracted from the first object.
@@ -94,8 +100,9 @@ This function is fully operational via S3-generics:
 }
 
 \section{Function version}{
- 0.2.0 (2016-09-09 10:32:17)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -115,16 +122,20 @@ plot_RLum(TL.curve.3)
 TL.curve.merged <- merge_RLum.Data.Curve(list(TL.curve.3, TL.curve.1), merge.method = "/")
 plot_RLum(TL.curve.merged)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). merge_RLum.Data.Curve(): Merge function for RLum.Data.Curve S4 class objects. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
 \seealso{
 \code{\link{merge_RLum}}, \code{\linkS4class{RLum.Data.Curve}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{utilities}
-
diff --git a/man/merge_RLum.Rd b/man/merge_RLum.Rd
index a425ee1..a8cb11d 100644
--- a/man/merge_RLum.Rd
+++ b/man/merge_RLum.Rd
@@ -38,8 +38,9 @@ automatically removed from the input \code{list}.
 So far not for every \code{RLum} object a merging function exists.
 }
 \section{Function version}{
- 0.1.2 (2016-05-02 09:36:06)
+ 0.1.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -58,11 +59,12 @@ temp2 <- calc_CentralDose(ExampleData.DeValues$CA1)
 temp.merged <- get_RLum(merge_RLum(objects = list(temp1, temp2)))
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). merge_RLum(): General merge function for RLum S4 class objects. Function version 0.1.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
@@ -70,5 +72,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Data.Image}},
 \code{\linkS4class{RLum.Data.Spectrum}}, \code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Results}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{utilities}
-
diff --git a/man/merge_RLum.Results.Rd b/man/merge_RLum.Results.Rd
index 0f6625b..b3f0c5d 100644
--- a/man/merge_RLum.Results.Rd
+++ b/man/merge_RLum.Results.Rd
@@ -18,10 +18,15 @@ rows are appended.
 The originator is taken from the first element and not reset to \code{merge_RLum}
 }
 \section{Function version}{
- 0.2.0 (2016-05-02 09:36:06)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). merge_RLum.Results(): Merge function for RLum.Results S4-class objects. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/merge_Risoe.BINfileData.Rd b/man/merge_Risoe.BINfileData.Rd
index 51221e5..3f82d33 100644
--- a/man/merge_Risoe.BINfileData.Rd
+++ b/man/merge_Risoe.BINfileData.Rd
@@ -8,11 +8,11 @@ merge_Risoe.BINfileData(input.objects, output.file,
   keep.position.number = FALSE, position.number.append.gap = 0)
 }
 \arguments{
-\item{input.objects}{\code{\link{character}} or
-\code{\linkS4class{Risoe.BINfileData}} (\bold{required}): Character vector
+\item{input.objects}{\code{\link{character}} with
+\code{\linkS4class{Risoe.BINfileData}} objects (\bold{required}): Character vector
 with path and files names (e.g. \code{input.objects = c("path/file1.bin",
 "path/file2.bin")} or \code{\linkS4class{Risoe.BINfileData}} objects (e.g.
-\code{input.objects = c(object1, object2)})}
+\code{input.objects = c(object1, object2)}). Alternatively a \code{list} is supported.}
 
 \item{output.file}{\code{\link{character}} (optional): File output path and
 name. \cr If no value is given, a \code{\linkS4class{Risoe.BINfileData}} is
@@ -63,8 +63,9 @@ With no additional summand the new position numbers would be:
 The validity of the output objects is not further checked.
 }
 \section{Function version}{
- 0.2.5 (2016-09-09 10:32:17)
+ 0.2.7 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -77,11 +78,12 @@ object2 <- CWOSL.SAR.Data
 object.new <- merge_Risoe.BINfileData(c(object1, object2))
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). merge_Risoe.BINfileData(): Merge Risoe.BINfileData objects or Risoe BIN-files. Function version 0.2.7. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Duller, G., 2007. Analyst.
 }
@@ -89,6 +91,9 @@ Duller, G., 2007. Analyst.
 \code{\linkS4class{Risoe.BINfileData}}, \code{\link{read_BIN2R}},
 \code{\link{write_R2BIN}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{IO}
 \keyword{manip}
-
diff --git a/man/methods_RLum.Rd b/man/methods_RLum.Rd
index 57c4b1c..74ab6aa 100644
--- a/man/methods_RLum.Rd
+++ b/man/methods_RLum.Rd
@@ -1,68 +1,70 @@
 % Generated by roxygen2: do not edit by hand
 % Please edit documentation in R/methods_RLum.R
 \name{methods_RLum}
-\alias{$.RLum.Analysis}
-\alias{$.RLum.Data.Curve}
-\alias{$.RLum.Results}
-\alias{*.RLum.Data.Curve}
-\alias{+.RLum.Data.Curve}
-\alias{-.RLum.Data.Curve}
-\alias{/.RLum.Data.Curve}
-\alias{[.RLum.Analysis}
-\alias{[.RLum.Data.Curve}
-\alias{[.RLum.Data.Image}
-\alias{[.RLum.Data.Spectrum}
-\alias{[.RLum.Results}
-\alias{[[.RLum.Analysis}
-\alias{[[.RLum.Results}
+\alias{methods_RLum}
+\alias{plot.list}
+\alias{plot.RLum.Results}
+\alias{plot.RLum.Analysis}
+\alias{plot.RLum.Data.Curve}
+\alias{plot.RLum.Data.Spectrum}
+\alias{plot.RLum.Data.Image}
+\alias{plot.Risoe.BINfileData}
+\alias{hist.RLum.Results}
+\alias{hist.RLum.Data.Image}
+\alias{hist.RLum.Data.Curve}
+\alias{hist.RLum.Analysis}
+\alias{summary.RLum.Results}
+\alias{summary.RLum.Analysis}
+\alias{summary.RLum.Data.Image}
+\alias{summary.RLum.Data.Curve}
+\alias{subset.Risoe.BINfileData}
+\alias{subset.RLum.Analysis}
+\alias{bin.RLum.Data.Curve}
+\alias{length.RLum.Results}
+\alias{length.RLum.Analysis}
+\alias{length.RLum.Data.Curve}
+\alias{length.Risoe.BINfileData}
+\alias{dim.RLum.Data.Curve}
+\alias{dim.RLum.Data.Spectrum}
+\alias{rep.RLum}
+\alias{names.RLum.Data.Curve}
+\alias{names.RLum.Data.Spectrum}
+\alias{names.RLum.Data.Image}
+\alias{names.RLum.Analysis}
+\alias{names.RLum.Results}
+\alias{names.Risoe.BINfileData}
+\alias{row.names.RLum.Data.Spectrum}
 \alias{as.data.frame.RLum.Data.Curve}
 \alias{as.data.frame.RLum.Data.Spectrum}
-\alias{as.list.RLum.Analysis}
-\alias{as.list.RLum.Data.Curve}
 \alias{as.list.RLum.Results}
+\alias{as.list.RLum.Data.Curve}
+\alias{as.list.RLum.Analysis}
 \alias{as.matrix.RLum.Data.Curve}
 \alias{as.matrix.RLum.Data.Spectrum}
-\alias{bin.RLum.Data.Curve}
-\alias{dim.RLum.Data.Curve}
-\alias{dim.RLum.Data.Spectrum}
-\alias{hist.RLum.Analysis}
-\alias{hist.RLum.Data.Curve}
-\alias{hist.RLum.Data.Image}
-\alias{hist.RLum.Results}
 \alias{is.RLum}
-\alias{is.RLum.Analysis}
 \alias{is.RLum.Data}
 \alias{is.RLum.Data.Curve}
-\alias{is.RLum.Data.Image}
 \alias{is.RLum.Data.Spectrum}
+\alias{is.RLum.Data.Image}
+\alias{is.RLum.Analysis}
 \alias{is.RLum.Results}
-\alias{length.RLum.Analysis}
-\alias{length.RLum.Data.Curve}
-\alias{length.RLum.Results}
-\alias{length.Risoe.BINfileData}
 \alias{merge.RLum}
-\alias{methods_RLum}
-\alias{names.RLum.Analysis}
-\alias{names.RLum.Data.Curve}
-\alias{names.RLum.Data.Image}
-\alias{names.RLum.Data.Spectrum}
-\alias{names.RLum.Results}
-\alias{names.Risoe.BINfileData}
-\alias{plot.RLum.Analysis}
-\alias{plot.RLum.Data.Curve}
-\alias{plot.RLum.Data.Image}
-\alias{plot.RLum.Data.Spectrum}
-\alias{plot.RLum.Results}
-\alias{plot.Risoe.BINfileData}
-\alias{plot.list}
-\alias{rep.RLum}
-\alias{row.names.RLum.Data.Spectrum}
-\alias{subset.Risoe.BINfileData}
-\alias{summary.RLum.Analysis}
-\alias{summary.RLum.Data.Curve}
-\alias{summary.RLum.Data.Image}
-\alias{summary.RLum.Results}
 \alias{unlist.RLum.Analysis}
+\alias{+.RLum.Data.Curve}
+\alias{-.RLum.Data.Curve}
+\alias{*.RLum.Data.Curve}
+\alias{/.RLum.Data.Curve}
+\alias{[.RLum.Data.Curve}
+\alias{[.RLum.Data.Spectrum}
+\alias{[.RLum.Data.Image}
+\alias{[.RLum.Analysis}
+\alias{[.RLum.Results}
+\alias{[<-.RLum.Data.Curve}
+\alias{[[.RLum.Analysis}
+\alias{[[.RLum.Results}
+\alias{$.RLum.Data.Curve}
+\alias{$.RLum.Analysis}
+\alias{$.RLum.Results}
 \title{methods_RLum}
 \usage{
 \method{plot}{list}(x, y, ...)
@@ -97,6 +99,8 @@
 
 \method{subset}{Risoe.BINfileData}(x, subset, records.rm = TRUE, ...)
 
+\method{subset}{RLum.Analysis}(x, subset, ...)
+
 bin.RLum.Data.Curve(x, ...)
 
 \method{length}{RLum.Results}(x, ...)
@@ -179,6 +183,8 @@ is.RLum.Results(x, ...)
 
 \method{[}{RLum.Results}(x, i, drop = TRUE)
 
+\method{[}{RLum.Data.Curve}(x, i, j) <- value
+
 \method{[[}{RLum.Analysis}(x, i)
 
 \method{[[}{RLum.Results}(x, i)
@@ -216,7 +222,12 @@ converting column names (to syntactic names: see make.names) is optional (see \c
 
 \item{drop}{\code{\link{logical}} (with default): keep object structure or drop it}
 
-\item{i}{\code{\link{character}} (optional): name of the wanted record type or data object}
+\item{i}{\code{\link{character}} (optional): name of the wanted record type or data object or row in the \code{RLum.Data.Curve} object}
+
+\item{j}{\code{\link{integer}} (optional): column of the data matrix in the \code{RLum.Data.Curve} object}
+
+\item{value}{\code{\link{numeric}} \bold{(required)}: numeric value which replace the value in the
+\code{RLum.Data.Curve} object}
 }
 \description{
 Methods for S3-generics implemented for the package 'Luminescence'.
@@ -255,4 +266,3 @@ curve1 * curve2
 IRSAR.RF.Data$RF
 
 }
-
diff --git a/man/model_LuminescenceSignals.Rd b/man/model_LuminescenceSignals.Rd
index 9facba8..b12dfd5 100644
--- a/man/model_LuminescenceSignals.Rd
+++ b/man/model_LuminescenceSignals.Rd
@@ -6,11 +6,12 @@
 \usage{
 model_LuminescenceSignals(model, sequence, lab.dose_rate = 1,
   simulate_sample_history = FALSE, plot = TRUE, verbose = TRUE,
-  show.structure = FALSE, ...)
+  show_structure = FALSE, own_parameters = NULL,
+  own_state_parameters = NULL, own_start_temperature = NULL, ...)
 }
 \arguments{
 \item{model}{\code{\link{character}} (\bold{required}): set model to be used. Available models are:
-"Bailey2001", "Bailey2002", "Bailey2004", "Pagonis2007", "Pagonis2008"}
+"Bailey2001", "Bailey2002", "Bailey2004", "Pagonis2007", "Pagonis2008" and "Friedrich2017".}
 
 \item{sequence}{\code{\link{list}} (\bold{required}): set sequence to model as \code{\link{list}} or as *.seq file from the
 Riso sequence editor. To simulate SAR measurements there is an extra option to set the sequence list (cf. details).}
@@ -18,16 +19,46 @@ Riso sequence editor. To simulate SAR measurements there is an extra option to s
 \item{lab.dose_rate}{\code{\link{numeric}} (with default): laboratory dose rate in XXX
 Gy/s for calculating seconds into Gray in the *.seq file.}
 
-\item{simulate_sample_history}{\code{\link{logical}} (with default): FALSE (with default): simulation begins at laboratory conditions, TRUE: simulations begins at crystallization (all levels 0)
-process}
+\item{simulate_sample_history}{\code{\link{logical}} (with default): FALSE (with default): simulation begins at laboratory conditions, 
+TRUE: simulations begins at crystallization (all levels 0) process}
 
 \item{plot}{\code{\link{logical}} (with default): Enables or disables plot output}
 
 \item{verbose}{\code{\link{logical}} (with default): Verbose mode on/off}
 
-\item{show.structure}{\code{\link{logical}} (with default): Shows the structure of the result.
+\item{show_structure}{\code{\link{logical}} (with default): Shows the structure of the result.
 Recommended to show record.id to analyse concentrations.}
 
+\item{own_parameters}{\code{\link{list}} (with default): This argument allows the user to submit own parameter sets. The \code{\link{list}}
+has to contain the following items:
+\itemize{
+ \item{N: Concentration of electron- and hole traps [cm^(-3)]}
+ \item{E: Electron/Hole trap depth [eV}
+ \item{s: Frequency factor [s^(-1)]}
+ \item{A: Conduction band to electron trap and valence band to hole trap transition probability [s^(-1) * cm^(3)]. 
+ \bold{CAUTION: Not every publication uses 
+ the same definition of parameter A and B! See vignette "RLumModel - Usage with own parameter sets" for further details}}
+ \item{B: Conduction band to hole centre transition probability [s^(-1) * cm^(3)].}
+ \item{Th: Photo-eviction constant or photoionisation cross section, respectively}
+ \item{E_th: Thermal assistence energy [eV]}
+ \item{k_B: Boltzman constant 8.617e-05 [eV/K]}
+ \item{W: activation energy 0.64 [eV] (for UV)}
+ \item{K: 2.8e7 (dimensionless constant)}
+ \item{model: "customized"}
+ \item{R (optional): Ionisation rate (pair production rate) equivalent to 1 Gy/s [s^(-1) * cm^(-3)]}
+ }
+
+For further details see Bailey 2001, Wintle 1975, vignette "RLumModel - Using own parameter sets" 
+and example 3.}
+
+\item{own_state_parameters}{\code{\link{numeric}} (with default): Some publications (e.g. Pagonis 2009)
+offer state parameters. With this argument the user can submit this state parameters. \bold{Note:} 
+You have to submit the state parameters for the conduction band and the valence band, too. For further details
+see vignette ""RLumModel - Using own parameter sets" and example 3.}
+
+\item{own_start_temperature}{\code{\link{numeric}} (with default): Parameter to control the start temperature (in deg. C) of
+a simulation. This parameter takes effect only when 'model = "customized"' is choosen.}
+
 \item{...}{further arguments and graphical parameters passed to
 \code{\link{plot.default}}. See details for further information.}
 }
@@ -37,10 +68,15 @@ Wrapper for the function \code{\link[RLumModel]{model_LuminescenceSignals}} from
 see the manual of this package.
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:36:06)
+ 0.1.3 (2017-06-29 18:40:14)
 }
+
 \author{
 Johannes Friedrich, University of Bayreuth (Germany),\cr
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaige (France), \cr
-\cr R Luminescence Package Team}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Friedrich, J., Kreutzer, S. (2017). model_LuminescenceSignals(): Model Luminescence Signals (wrapper). Function version 0.1.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/names_RLum.Rd b/man/names_RLum.Rd
index cc63562..6d0a1ad 100644
--- a/man/names_RLum.Rd
+++ b/man/names_RLum.Rd
@@ -23,12 +23,9 @@ corresponding 'names' function will be selected. Allowed arguments can be found
 in the documentations of the corresponding \code{\linkS4class{RLum}} class.
 }
 \section{Function version}{
- 0.1.0 (2015-11-29 17:27:48)
+ 0.1.0 (2017-06-29 18:40:14)
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \seealso{
 \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Image}},
@@ -36,5 +33,13 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}
 }
-\keyword{utilities}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). names_RLum(): S4-names function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{utilities}
diff --git a/man/plot_AbanicoPlot.Rd b/man/plot_AbanicoPlot.Rd
index b9a17f0..cbc4741 100644
--- a/man/plot_AbanicoPlot.Rd
+++ b/man/plot_AbanicoPlot.Rd
@@ -10,7 +10,7 @@ plot_AbanicoPlot(data, na.rm = TRUE, log.z = TRUE, z.0 = "mean.weighted",
   rug = FALSE, kde = TRUE, hist = FALSE, dots = FALSE,
   boxplot = FALSE, y.axis = TRUE, error.bars = FALSE, bar, bar.col,
   polygon.col, line, line.col, line.lty, line.label, grid.col, frame = 1,
-  bw = "SJ", output = FALSE, interactive = FALSE, ...)
+  bw = "SJ", output = TRUE, interactive = FALSE, ...)
 }
 \arguments{
 \item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
@@ -134,7 +134,7 @@ a numeric value for manual setting.}
 
 \item{output}{\code{\link{logical}}: Optional output of numerical plot
 parameters. These can be useful to reproduce similar plots. Default is
-\code{FALSE}.}
+\code{TRUE}.}
 
 \item{interactive}{\code{\link{logical}} (with default): create an interactive
 abanico plot (requires the 'plotly' package)}
@@ -221,8 +221,9 @@ z-scale to specify where ticks are to be drawn by using the parameter
 \code{zlim}-definition.
 }
 \section{Function version}{
- 0.1.10 (2016-09-09 10:32:17)
+ 0.1.10 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data and recalculate to Gray
@@ -402,12 +403,12 @@ points(x = AP$data[[1]]$precision[!in_2sigma],
        y = AP$data[[1]]$std.estimate.plot[!in_2sigma],
        pch = 1)
 
+} 
+
+\section{How to cite}{
+Dietze, M., Kreutzer, S. (2017). plot_AbanicoPlot(): Function to create an Abanico Plot.. Function version 0.1.10. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
-IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Inspired by a plot
-introduced by Galbraith & Green (1990)
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R. & Green, P., 1990. Estimating the component ages
 in a finite mixture. International Journal of Radiation Applications and
@@ -422,4 +423,8 @@ Quaternary Geochronology. doi:10.1016/j.quageo.2015.09.003
 \code{\link{plot_RadialPlot}}, \code{\link{plot_KDE}},
 \code{\link{plot_Histogram}}
 }
-
+\author{
+Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Inspired by a plot
+introduced by Galbraith & Green (1990)
+\cr R Luminescence Package Team}
diff --git a/man/plot_DRTResults.Rd b/man/plot_DRTResults.Rd
index 41ade5b..d921189 100644
--- a/man/plot_DRTResults.Rd
+++ b/man/plot_DRTResults.Rd
@@ -90,8 +90,9 @@ Further data and plot arguments can be added by using the appropiate R
 commands.
 }
 \section{Function version}{
- 0.1.10 (2016-09-09 10:32:17)
+ 0.1.10 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -155,11 +156,12 @@ plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
                 preheat = c(200, 200, 200, 240, 240),
                 boxplot = TRUE)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Dietze, M. (2017). plot_DRTResults(): Visualise dose recovery test results. Function version 0.1.10. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France), Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Wintle, A.G., Murray, A.S., 2006. A review of quartz optically
 stimulated luminescence characteristics and their relevance in
@@ -169,5 +171,8 @@ single-aliquot regeneration dating protocols. Radiation Measurements, 41,
 \seealso{
 \code{\link{plot}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
 \keyword{dplot}
-
diff --git a/man/plot_DetPlot.Rd b/man/plot_DetPlot.Rd
index 91712eb..4a06adf 100644
--- a/man/plot_DetPlot.Rd
+++ b/man/plot_DetPlot.Rd
@@ -98,8 +98,9 @@ every sequence should be checked carefully before running long calculations usin
 hundreds of channels.
 }
 \section{Function version}{
- 0.1.0 (2016-05-19 23:48:19)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 \dontrun{
@@ -120,10 +121,12 @@ plot_DetPlot(object,
 )
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_DetPlot(): Create De(t) plot. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Bailey, R.M., Singarayer, J.S., Ward, S., Stokes, S., 2003. Identification of partial resetting
 using De as a function of illumination time. Radiation Measurements 37, 511-518.
@@ -132,4 +135,6 @@ doi:10.1016/S1350-4487(03)00063-5
 \seealso{
 \code{\link{plot}}, \code{\link{analyse_SAR.CWOSL}}, \code{\link{analyse_pIRIRSequence}}
 }
-
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
diff --git a/man/plot_FilterCombinations.Rd b/man/plot_FilterCombinations.Rd
index 761f4d8..928824d 100644
--- a/man/plot_FilterCombinations.Rd
+++ b/man/plot_FilterCombinations.Rd
@@ -5,7 +5,7 @@
 \title{Plot filter combinations along with the (optional) net transmission window}
 \usage{
 plot_FilterCombinations(filters, wavelength_range = 200:1000,
-  show_net_transmission = TRUE, plot = TRUE, ...)
+  show_net_transmission = TRUE, interactive = FALSE, plot = TRUE, ...)
 }
 \arguments{
 \item{filters}{\code{\link{list}} (\bold{required}): a named list of filter data for each filter to be shown.
@@ -17,6 +17,8 @@ The filter data itself should be either provided as \code{\link{data.frame}} or
 \item{show_net_transmission}{\code{\link{logical}} (with default): show net transmission window
 as polygon.}
 
+\item{interactive}{\code{\link{logical}} (with default): enable/disable interactive plot}
+
 \item{plot}{\code{\link{logical}} (with default): enables or disables the plot output}
 
 \item{\dots}{further arguments that can be passed to control the plot output. Suppored are \code{main},
@@ -30,6 +32,7 @@ Returns an S4 object of type \code{\linkS4class{RLum.Results}}.
 \tabular{lll}{
 \bold{Object} \tab \bold{Type} \bold{Description} \cr
  net_transmission_window \tab \code{matrix} \tab the resulting net transmission window \cr
+ OD_total \tab \code{matrix} \tab the total optical density\cr
  filter_matrix \tab \code{matrix} \tab the filter matrix used for plotting
 
 }
@@ -47,6 +50,23 @@ wavelenghts are automatically interpolated for the given filter data using the f
 With that a standardised output is reached and a net transmission window can be shown.\cr
 }
 \details{
+\bold{Calculations}\cr
+
+\bold{Net transmission window}\cr
+The net transmission window of two filters is approximated by
+
+\deqn{T_{final} = T_{1} * T_{2}}
+
+\bold{Optical density}\cr
+
+\deqn{OD = -log(T)}
+
+\bold{Total optical density}\cr
+
+\deqn{OD_{total} = OD_{1} +  OD_{2}}
+
+Please consider using own calculations for more precise values.
+
 \bold{How to provide input data?}\cr
 
 CASE 1\cr
@@ -87,6 +107,8 @@ The following further non-common plotting parameters can be passed to the functi
 \code{legend.pos} \tab \code{character} \tab change legend position (\code{\link[graphics]{legend}}) \cr
 \code{legend.text} \tab \code{character} \tab same as the argument \code{legend} in (\code{\link[graphics]{legend}}) \cr
 \code{net_transmission.col} \tab \code{col} \tab colour of net transmission window polygon \cr
+\code{net_transmission.col_lines} \tab \code{col} \tab colour of net transmission window polygon lines \cr
+\code{ net_transmission.density} \tab  \code{numeric} \tab specify line density in the transmission polygon \cr
 \code{grid} \tab \code{list} \tab full list of arguments that can be passd to the function \code{\link[graphics]{grid}}
 }
 
@@ -95,8 +117,9 @@ can be fully customised by disabling the standard legend and use the function \c
 instead.
 }
 \section{Function version}{
- 0.1.0 (2016-08-26 10:45:14)
+ 0.3.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## (For legal reasons no real filter data are provided)
@@ -114,14 +137,28 @@ results <- plot_FilterCombinations(
 filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6)))
 results
 
+## Example 3 show optical density
+plot(results$OD_total)
+
+\dontrun{
+##Example 4
+##show the filters using the interative mode
+plot_FilterCombinations(filters = list(filter1, filter2), interative = TRUE)
+
+}
+
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
-\cr R Luminescence Package Team}
 \seealso{
 \code{\linkS4class{RLum.Results}}, \code{\link{approx}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montagine (France)\cr
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_FilterCombinations(): Plot filter combinations along with the (optional) net transmission window. Function version 0.3.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
 \keyword{aplot}
 \keyword{datagen}
-
diff --git a/man/plot_GrowthCurve.Rd b/man/plot_GrowthCurve.Rd
index 4db3306..5dac4a6 100644
--- a/man/plot_GrowthCurve.Rd
+++ b/man/plot_GrowthCurve.Rd
@@ -4,11 +4,11 @@
 \alias{plot_GrowthCurve}
 \title{Fit and plot a growth curve for luminescence data (Lx/Tx against dose)}
 \usage{
-plot_GrowthCurve(sample, na.rm = TRUE, fit.method = "EXP",
-  fit.force_through_origin = FALSE, fit.weights = TRUE,
-  fit.includingRepeatedRegPoints = TRUE, fit.NumberRegPoints = NULL,
-  fit.NumberRegPointsReal = NULL, fit.bounds = TRUE,
-  NumberIterations.MC = 100, output.plot = TRUE,
+plot_GrowthCurve(sample, na.rm = TRUE, mode = "interpolation",
+  fit.method = "EXP", fit.force_through_origin = FALSE,
+  fit.weights = TRUE, fit.includingRepeatedRegPoints = TRUE,
+  fit.NumberRegPoints = NULL, fit.NumberRegPointsReal = NULL,
+  fit.bounds = TRUE, NumberIterations.MC = 100, output.plot = TRUE,
   output.plotExtended = TRUE, output.plotExtended.single = FALSE,
   cex.global = 1, txtProgressBar = TRUE, verbose = TRUE, ...)
 }
@@ -21,6 +21,12 @@ fits at least three dose points (including the natural) should be provided.}
 \item{na.rm}{\code{\link{logical}} (with default): excludes \code{NA} values
 from the data set prior to any further operations.}
 
+\item{mode}{\code{\link{character}} (with default): selects calculation mode of the function.
+(A) \code{"interpolation"} (default) calculates the De by interpolation,
+(B) \code{"extrapolation"} calculates the De by extrapolation and
+(C) \code{"alternate"} calculates no De and just fits the data points. Please note that
+for option \code{"regenrative"} the first point is considered as natural dose}
+
 \item{fit.method}{\code{\link{character}} (with default): function used for
 fitting. Possible options are: \code{LIN}, \code{QDR}, \code{EXP}, \code{EXP OR LIN},
 \code{EXP+LIN} or \code{EXP+EXP}. See details.}
@@ -95,7 +101,8 @@ the slot \code{data} contains the following elements:\cr
 }
 \description{
 A dose response curve is produced for luminescence measurements using a
-regenerative protocol.
+regenerative or additive protocol. The function supports interpolation and
+extraxpolation to calculate the equivalent dose.
 }
 \details{
 \bold{Fitting methods} \cr\cr For all options (except for the \code{LIN}, \code{QDR} and
@@ -152,8 +159,9 @@ To avoid plotting the subtitle information, provide an empty user mtext \code{mt
 To plot any other subtitle text, use \code{mtext}.
 }
 \section{Function version}{
- 1.8.16 (2016-09-09 10:32:17)
+ 1.9.5 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##(1) plot growth curve for a dummy data.set and show De value
@@ -182,13 +190,25 @@ plot(
  type = "l"
 )
 
+##(5) plot using the 'extrapolation' mode
+LxTxData[1,2:3] <- c(0.5, 0.001)
+print(plot_GrowthCurve(LxTxData,mode = "extrapolation"))
+
+##(6) plot using the 'alternate' mode
+LxTxData[1,2:3] <- c(0.5, 0.001)
+print(plot_GrowthCurve(LxTxData,mode = "alternate"))
+
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France), \cr Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
 \seealso{
 \code{\link{nls}}, \code{\linkS4class{RLum.Results}},
 \code{\link{get_RLum}}, \code{\link[minpack.lm]{nlsLM}}, \code{\link{lm}}, \code{uniroot}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), \cr Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S., Dietze, M. (2017). plot_GrowthCurve(): Fit and plot a growth curve for luminescence data (Lx/Tx against dose). Function version 1.9.5. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/plot_Histogram.Rd b/man/plot_Histogram.Rd
index b01ad38..5899789 100644
--- a/man/plot_Histogram.Rd
+++ b/man/plot_Histogram.Rd
@@ -83,8 +83,9 @@ error), \code{"seabs.weighted"} (error-weighted absolute standard error),
 The input data is not restricted to a special type.
 }
 \section{Function version}{
- 0.4.4 (2016-07-16 11:28:11)
+ 0.4.4 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load data
@@ -113,11 +114,15 @@ plot_Histogram(ExampleData.DeValues,
 
 
 }
+\seealso{
+\code{\link{hist}}, \code{\link{plot}}
+}
 \author{
 Michael Dietze, GFZ Potsdam (Germany), \cr Sebastian Kreutzer,
 IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
-\seealso{
-\code{\link{hist}}, \code{\link{plot}}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Dietze, M., Kreutzer, S. (2017). plot_Histogram(): Plot a histogram with separate error plot. Function version 0.4.4. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
 
diff --git a/man/plot_KDE.Rd b/man/plot_KDE.Rd
index 5dabf9f..11fabd9 100644
--- a/man/plot_KDE.Rd
+++ b/man/plot_KDE.Rd
@@ -6,7 +6,7 @@
 \usage{
 plot_KDE(data, na.rm = TRUE, values.cumulative = TRUE, order = TRUE,
   boxplot = TRUE, rug = TRUE, summary, summary.pos,
-  summary.method = "MCM", bw = "nrd0", output = FALSE, ...)
+  summary.method = "MCM", bw = "nrd0", output = TRUE, ...)
 }
 \arguments{
 \item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
@@ -16,7 +16,7 @@ data sets, these must be provided as \code{list} (e.g. \code{list(dataset1,
 dataset2)}).}
 
 \item{na.rm}{\code{\link{logical}} (with default): exclude NA values
-from the data set prior to any further operations.}
+from the data set prior to any further operation.}
 
 \item{values.cumulative}{\code{\link{logical}} (with default): show
 cumulative individual data.}
@@ -51,7 +51,7 @@ value for manual setting.}
 
 \item{output}{\code{\link{logical}}: Optional output of numerical plot
 parameters. These can be useful to reproduce similar plots. Default is
-\code{FALSE}.}
+\code{TRUE}.}
 
 \item{\dots}{further arguments and graphical parameters passed to
 \code{\link{plot}}.}
@@ -102,8 +102,9 @@ The plot output is no 'probability density' plot (cf. the discussion
 of Berger and Galbraith in Ancient TL; see references)!
 }
 \section{Function version}{
- 3.5.3 (2016-09-09 10:32:17)
+ 3.5.5 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## read example data set
@@ -159,11 +160,15 @@ KDE_out <- plot_KDE(data = ExampleData.DeValues,
 output = TRUE)
 
 }
+\seealso{
+\code{\link{density}}, \code{\link{plot}}
+}
 \author{
 Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
 IRAMAT-CRP2A, Universite Bordeaux Montaigne
-\cr R Luminescence Package Team}
-\seealso{
-\code{\link{density}}, \code{\link{plot}}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Dietze, M., Kreutzer, S. (2017). plot_KDE(): Plot kernel density estimate with statistics. Function version 3.5.5. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
 
diff --git a/man/plot_NRt.Rd b/man/plot_NRt.Rd
index 9ccdade..fc97fff 100644
--- a/man/plot_NRt.Rd
+++ b/man/plot_NRt.Rd
@@ -121,10 +121,12 @@ for (i in 1:length(aliquot)) {
 par(mfrow = c(1, 1))
 
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). plot_NRt(): Visualise natural/regenerated signal ratios. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-}
+
 \references{
 Steffen, D., Preusser, F., Schlunegger, F., 2009. OSL quartz underestimation due to
 unstable signal components. Quaternary Geochronology, 4, 353-362.
@@ -132,4 +134,6 @@ unstable signal components. Quaternary Geochronology, 4, 353-362.
 \seealso{
 \code{\link{plot}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+}
diff --git a/man/plot_RLum.Analysis.Rd b/man/plot_RLum.Analysis.Rd
index b51fa6c..32709b9 100644
--- a/man/plot_RLum.Analysis.Rd
+++ b/man/plot_RLum.Analysis.Rd
@@ -4,7 +4,7 @@
 \alias{plot_RLum.Analysis}
 \title{Plot function for an RLum.Analysis S4 class object}
 \usage{
-plot_RLum.Analysis(object, subset, nrows, ncols, abline = NULL,
+plot_RLum.Analysis(object, subset = NULL, nrows, ncols, abline = NULL,
   combine = FALSE, curve.transformation, plot.single = FALSE, ...)
 }
 \arguments{
@@ -75,8 +75,9 @@ Only plotting of \code{RLum.Data.Curve} and \code{RLum.Data.Spectrum}
 objects are currently supported.\cr
 }
 \section{Function version}{
- 0.3.6 (2016-09-09 10:32:17)
+ 0.3.8 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load data
@@ -85,20 +86,32 @@ data(ExampleData.BINfileData, envir = environment())
 ##convert values for position 1
 temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
 
-##plot (combine) TL curves in one plot
+##(1) plot (combine) TL curves in one plot
+plot_RLum.Analysis(
+temp,
+subset = list(recordType = "TL"),
+combine = TRUE,
+norm = TRUE,
+abline = list(v = c(110))
+)
+
+##(2) same as example (1) but using
+## the argument smooth = TRUE
 plot_RLum.Analysis(
 temp,
 subset = list(recordType = "TL"),
 combine = TRUE,
 norm = TRUE,
+smooth = TRUE,
 abline = list(v = c(110))
 )
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_RLum.Analysis(): Plot function for an RLum.Analysis S4 class object. Function version 0.3.8. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
@@ -106,5 +119,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\link{plot}}, \code{\link{plot_RLum}},
 \code{\link{plot_RLum.Data.Curve}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{aplot}
-
diff --git a/man/plot_RLum.Data.Curve.Rd b/man/plot_RLum.Data.Curve.Rd
index 3a24486..42c32b1 100644
--- a/man/plot_RLum.Data.Curve.Rd
+++ b/man/plot_RLum.Data.Curve.Rd
@@ -39,8 +39,9 @@ according to \code{\link{plot}}.
 Not all arguments of \code{\link{plot}} will be passed!
 }
 \section{Function version}{
- 0.2.0 (2016-05-02 09:36:06)
+ 0.2.3 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -56,16 +57,20 @@ temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
 plot_RLum.Data.Curve(temp)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_RLum.Data.Curve(): Plot function for an RLum.Data.Curve S4 class object. Function version 0.2.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
 \seealso{
 \code{\link{plot}}, \code{\link{plot_RLum}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{aplot}
-
diff --git a/man/plot_RLum.Data.Image.Rd b/man/plot_RLum.Data.Image.Rd
index e44a99f..f6e679f 100644
--- a/man/plot_RLum.Data.Image.Rd
+++ b/man/plot_RLum.Data.Image.Rd
@@ -76,8 +76,9 @@ function is not optimized to handle image data > ca. 200 MByte and thus
 plotting of such data is extremely slow.
 }
 \section{Function version}{
- 0.1 (2015-11-29 17:27:48)
+ 0.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -87,11 +88,12 @@ data(ExampleData.RLum.Data.Image, envir = environment())
 ##plot data
 plot_RLum.Data.Image(ExampleData.RLum.Data.Image)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_RLum.Data.Image(): Plot function for an RLum.Data.Image S4 class object. Function version 0.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
@@ -99,5 +101,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Data.Image}}, \code{\link{plot}},
 \code{\link{plot_RLum}}, \code{\link[raster]{raster}},
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{aplot}
-
diff --git a/man/plot_RLum.Data.Spectrum.Rd b/man/plot_RLum.Data.Spectrum.Rd
index 8e296ff..5763ddf 100644
--- a/man/plot_RLum.Data.Spectrum.Rd
+++ b/man/plot_RLum.Data.Spectrum.Rd
@@ -144,15 +144,17 @@ summed up. To select a transect use the \code{xlim} argument, e.g.
 \bold{Further arguments that will be passed (depending on the plot type)}
 
 \code{xlab}, \code{ylab}, \code{zlab}, \code{xlim}, \code{ylim},
-\code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type}, \code{col},
-\code{border}, \code{box} \code{lwd}, \code{bty} \cr
+\code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type} ("single", "multiple.lines",
+"interactive"), \code{col},
+\code{border}, \code{box} \code{lwd}, \code{bty}, \code{showscale} ("interactive") \cr
 }
 \note{
 Not all additional arguments (\code{...}) will be passed similarly!
 }
 \section{Function version}{
- 0.5.0 (2016-09-09 10:32:17)
+ 0.5.3 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -184,22 +186,37 @@ plot_RLum.Data.Spectrum(TL.Spectrum,
                         bin.cols = 1)
 
 \dontrun{
- ##(4) interactive plot using the package plotly
+ ##(4) interactive plot using the package plotly ("surface")
  plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
  xlim = c(310,750), ylim = c(0,300), bin.rows=10,
  bin.cols = 1)
 
- ##(5) alternative using the package fields
+ ##(5) interactive plot using the package plotly ("contour")
+ plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+ xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+ bin.cols = 1,
+ type = "contour",
+ showscale = TRUE)
+
+ ##(6) interactive plot using the package plotly ("heatmap")
+ plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+ xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+ bin.cols = 1,
+ type = "heatmap",
+ showscale = TRUE)
+
+ ##(7) alternative using the package fields
  fields::image.plot(get_RLum(TL.Spectrum))
  contour(get_RLum(TL.Spectrum), add = TRUE)
 
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_RLum.Data.Spectrum(): Plot function for an RLum.Data.Spectrum S4 class object. Function version 0.5.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Blasse, G., Grabmaier, B.C., 1994. Luminescent Materials.
 Springer.
@@ -209,5 +226,8 @@ Springer.
 \code{\link{plot_RLum}}, \code{\link{persp}}, \code{\link[plotly]{plot_ly}},
 \code{\link{contour}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{aplot}
-
diff --git a/man/plot_RLum.Rd b/man/plot_RLum.Rd
index 5edbaf4..303dbc7 100644
--- a/man/plot_RLum.Rd
+++ b/man/plot_RLum.Rd
@@ -45,8 +45,9 @@ found in the documentations of each plot function.  \tabular{lll}{
 The provided plot output depends on the input object.
 }
 \section{Function version}{
- 0.4.2 (2016-09-09 10:32:17)
+ 0.4.3 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -60,11 +61,12 @@ temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
 plot_RLum(temp)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_RLum(): General plot function for RLum S4 class objects. Function version 0.4.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
@@ -76,5 +78,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}}, \code{\link{plot_RLum.Results}},
 \code{\linkS4class{RLum.Results}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{dplot}
-
diff --git a/man/plot_RLum.Results.Rd b/man/plot_RLum.Results.Rd
index 1bed719..2a0a3d3 100644
--- a/man/plot_RLum.Results.Rd
+++ b/man/plot_RLum.Results.Rd
@@ -33,8 +33,9 @@ Not all arguments available for \code{\link{plot}} will be passed!
 Only plotting of \code{RLum.Results} objects are supported.
 }
 \section{Function version}{
- 0.2.1 (2016-05-16 22:24:15)
+ 0.2.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -54,16 +55,20 @@ grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = F
 plot_RLum.Results(grains)
 
 
+} 
+
+\section{How to cite}{
+Burow, C., Kreutzer, S. (2017). plot_RLum.Results(): Plot function for an RLum.Results S4 class object. Function version 0.2.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, IRAMAT-CRP2A,
-Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
 \seealso{
 \code{\link{plot}}, \code{\link{plot_RLum}},
 }
+\author{
+Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, IRAMAT-CRP2A,
+Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{aplot}
-
diff --git a/man/plot_RadialPlot.Rd b/man/plot_RadialPlot.Rd
index b6d5f03..f0870af 100644
--- a/man/plot_RadialPlot.Rd
+++ b/man/plot_RadialPlot.Rd
@@ -4,10 +4,10 @@
 \alias{plot_RadialPlot}
 \title{Function to create a Radial Plot}
 \usage{
-plot_RadialPlot(data, na.rm = TRUE, negatives = "remove", log.z = TRUE,
-  central.value, centrality = "mean.weighted", mtext, summary, summary.pos,
-  legend, legend.pos, stats, rug = FALSE, plot.ratio, bar.col,
-  y.ticks = TRUE, grid.col, line, line.col, line.label, output = FALSE, ...)
+plot_RadialPlot(data, na.rm = TRUE, log.z = TRUE, central.value,
+  centrality = "mean.weighted", mtext, summary, summary.pos, legend,
+  legend.pos, stats, rug = FALSE, plot.ratio, bar.col, y.ticks = TRUE,
+  grid.col, line, line.col, line.label, output = FALSE, ...)
 }
 \arguments{
 \item{data}{\code{\link{data.frame}} or \code{\linkS4class{RLum.Results}}
@@ -18,10 +18,6 @@ data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.}
 \item{na.rm}{\code{\link{logical}} (with default): excludes \code{NA}
 values from the data set prior to any further operations.}
 
-\item{negatives}{\code{\link{character}} (with default): rule for negative
-values. Default is \code{"remove"} (i.e. negative values are removed from
-the data set).}
-
 \item{log.z}{\code{\link{logical}} (with default): Option to display the
 z-axis in logarithmic scale. Default is \code{TRUE}.}
 
@@ -131,8 +127,9 @@ error), \code{"seabs.weighted"} (error-weighted absolute standard error),
 \code{"kurtosis"} (kurtosis) and \code{"skewness"} (skewness).
 }
 \section{Function version}{
- 0.5.3 (2016-05-19 23:47:38)
+ 0.5.4 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## load example data
@@ -224,12 +221,12 @@ plot_RadialPlot(data = data.3,
                 summary.pos = "sub",
                 legend = c("Sample 1", "Sample 2"))
 
+} 
+
+\section{How to cite}{
+Dietze, M., Kreutzer, S. (2017). plot_RadialPlot(): Function to create a Radial Plot. Function version 0.5.4. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
-IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Based on a rewritten
-S script of Rex Galbraith, 2010
-\cr R Luminescence Package Team}
+
 \references{
 Galbraith, R.F., 1988. Graphical Display of Estimates Having
 Differing Standard Errors. Technometrics, 30 (3), 271-281.
@@ -260,4 +257,8 @@ recommendations. Quaternary Geochronology, 11, 1-27.
 \code{\link{plot}}, \code{\link{plot_KDE}},
 \code{\link{plot_Histogram}}
 }
-
+\author{
+Michael Dietze, GFZ Potsdam (Germany),\cr Sebastian Kreutzer,
+IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)\cr Based on a rewritten
+S script of Rex Galbraith, 2010
+\cr R Luminescence Package Team}
diff --git a/man/plot_Risoe.BINfileData.Rd b/man/plot_Risoe.BINfileData.Rd
index 8468ba0..86e3e06 100644
--- a/man/plot_Risoe.BINfileData.Rd
+++ b/man/plot_Risoe.BINfileData.Rd
@@ -79,8 +79,9 @@ The function has been successfully tested for the Sequence Editor file
 output version 3 and 4.
 }
 \section{Function version}{
- 0.4.1 (2015-11-29 17:27:48)
+ 0.4.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -99,11 +100,12 @@ data(ExampleData.BINfileData, envir = environment())
 #mtext(side = 4, BINfile, outer = TRUE, col = "blue", cex = .7)
 #dev.off()
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Dietze, M. (2017). plot_Risoe.BINfileData(): Plot single luminescence curves from a BIN file object. Function version 0.4.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France),\cr Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 Duller, G., 2007. Analyst. pp. 1-45.
 }
@@ -112,5 +114,8 @@ Duller, G., 2007. Analyst. pp. 1-45.
 \code{\link{CW2pLM}}, \code{\link{CW2pLMi}}, \code{\link{CW2pPMi}},
 \code{\link{CW2pHMi}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France),\cr Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
 \keyword{dplot}
-
diff --git a/man/plot_ViolinPlot.Rd b/man/plot_ViolinPlot.Rd
index b9399ca..f5b6f01 100644
--- a/man/plot_ViolinPlot.Rd
+++ b/man/plot_ViolinPlot.Rd
@@ -58,8 +58,9 @@ two other R packages exist providing a possibility to produces this kind of plot
 'vioplot' and 'violinmplot' (see References for details).
 }
 \section{Function version}{
- 0.1.2 (2016-05-17 13:27:04)
+ 0.1.3 (2017-06-29 18:40:14)
 }
+
 \examples{
 ## read example data set
 data(ExampleData.DeValues, envir = environment())
@@ -68,10 +69,12 @@ ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)
 ## create plot straightforward
 plot_ViolinPlot(data = ExampleData.DeValues)
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). plot_ViolinPlot(): Create a violin plot. Function version 0.1.3. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot.
 R package version 0.2 http://CRAN.R-project.org/package=violplot
@@ -87,4 +90,6 @@ Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New Yor
 \code{\link[stats]{density}}, \code{\link{plot}}, \code{\link{boxplot}}, \code{\link{rug}},
 \code{\link{calc_Statistics}}
 }
-
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
diff --git a/man/read_BIN2R.Rd b/man/read_BIN2R.Rd
index c37a535..15f361f 100644
--- a/man/read_BIN2R.Rd
+++ b/man/read_BIN2R.Rd
@@ -7,13 +7,13 @@
 read_BIN2R(file, show.raw.values = FALSE, position = NULL,
   n.records = NULL, zero_data.rm = TRUE, duplicated.rm = FALSE,
   fastForward = FALSE, show.record.number = FALSE, txtProgressBar = TRUE,
-  forced.VersionNumber = NULL, pattern = NULL, verbose = TRUE, ...)
+  forced.VersionNumber = NULL, ignore.RECTYPE = FALSE, pattern = NULL,
+  verbose = TRUE, ...)
 }
 \arguments{
 \item{file}{\code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
-BIN/BINX file. If input is a \code{list} it should comprise only \code{character}s representing
-each valid path and BIN/BINX-file names.
-Alternatively the input character can be just a directory (path), in this case the
+BIN/BINX file (URLs are supported). If input is a \code{list} it should comprise only \code{character}s representing
+each valid path and BIN/BINX-file names. Alternatively the input character can be just a directory (path), in this case the
 the function tries to detect and import all BIN/BINX files found in the directory.}
 
 \item{show.raw.values}{\link{logical} (with default): shows raw values from
@@ -51,6 +51,10 @@ version number check in the function by own values for cases where the
 BIN-file version is not supported. Can be provided as \code{list} if \code{file} is a \code{list}.\cr
 Note: The usage is at own risk, only supported BIN-file versions have been tested.}
 
+\item{ignore.RECTYPE}{\code{\link{logical}} (with default): this argument allows to ignore values
+in the byte 'REGTYPE' (BIN-file version 08), in case there are not documented or faulty set.
+If set all records are treated like records of 'REGYPE' 0 or 1.}
+
 \item{pattern}{\code{\link{character}} (optional): argument that is used if only a path is provided.
 The argument will than be passed to the function \code{\link{list.files}} used internally to
 construct a \code{list} of wanted files}
@@ -89,8 +93,9 @@ version number depends on the used Sequence Editor.\cr\cr
 import.}
 }
 \section{Function version}{
- 0.15.0 (2016-06-13 21:17:19)
+ 0.15.6 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -100,19 +105,23 @@ import.}
 #temp <- read_BIN2R(FILE)
 #temp
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Fuchs, M.C., Fuchs, M. (2017). read_BIN2R(): Import Risoe BIN-file into R. Function version 0.15.6. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France), Margret C. Fuchs, HZDR Freiberg, (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
-\url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+\url{http://www.nutech.dtu.dk/english/products-and-services/radiation-instruments/tl_osl_reader/manuals}
 }
 \seealso{
 \code{\link{write_R2BIN}}, \code{\linkS4class{Risoe.BINfileData}},
 \code{\link[base]{readBin}}, \code{\link{merge_Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}}
 \code{\link[utils]{txtProgressBar}}, \code{\link{list.files}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France), Margret C. Fuchs, HZDR Freiberg, (Germany)
+\cr R Luminescence Package Team}
 \keyword{IO}
-
diff --git a/man/read_Daybreak2R.Rd b/man/read_Daybreak2R.Rd
index 14f0739..69ccfe4 100644
--- a/man/read_Daybreak2R.Rd
+++ b/man/read_Daybreak2R.Rd
@@ -2,9 +2,9 @@
 % Please edit documentation in R/read_Daybreak2R.R
 \name{read_Daybreak2R}
 \alias{read_Daybreak2R}
-\title{Import Daybreak ASCII dato into R}
+\title{Import measurement data produced by a Daybreak TL/OSL reader into R}
 \usage{
-read_Daybreak2R(file, verbose = TRUE, txtProgressBar = TRUE)
+read_Daybreak2R(file, raw = FALSE, verbose = TRUE, txtProgressBar = TRUE)
 }
 \arguments{
 \item{file}{\code{\link{character}} or \code{\link{list}} (\bold{required}): path and file name of the
@@ -12,6 +12,10 @@ file to be imported. Alternatively a list of file names can be provided or just
 containing measurement data. Please note that the specific, common, file extension (txt) is likely
 leading to function failures during import when just a path is provided.}
 
+\item{raw}{\code{\link{logical}} (with default): if the input is a DAT-file (binary) a
+\code{\link[data.table]{data.table}} instead of the \code{\linkS4class{RLum.Analysis}} object
+can be returned for debugging purposes.}
+
 \item{verbose}{\code{\link{logical}} (with default): enables or disables terminal feedback}
 
 \item{txtProgressBar}{\code{\link{logical}} (with default): enables or disables
@@ -21,28 +25,42 @@ leading to function failures during import when just a path is provided.}
 A list of \code{\linkS4class{RLum.Analysis}} objects (each per position) is provided.
 }
 \description{
-Import a *.txt (ASCII) file produced by a Daybreak reader into R.
+Import a TXT-file (ASCII file) or a DAT-file (binary file) produced by a Daybreak reader into R.
+The import of the DAT-files is limited to the file format described for the software TLAPLLIC v.3.2
+used for a Daybreak, model 1100.
 }
 \note{
-\bold{[BETA VERSION]} This function version still needs to be properly tested.
+\bold{[BETA VERSION]} This function still needs to be tested properly. In particular
+the function has underwent only very rough rests using a few files.
 }
 \section{Function version}{
- 0.2.1 (2016-05-02 09:36:06)
+ 0.3.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
-## This function has no example yet.
+\dontrun{
+file <- file.choose()
+temp <- read_Daybreak2R(file)
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)\cr Based on a suggestion by Willian Amidon and Andrew Louis Gorin.
-\cr R Luminescence Package Team}
+
+} 
+
+\section{How to cite}{
+Kreutzer, S., Zink, A. (2017). read_Daybreak2R(): Import measurement data produced by a Daybreak TL/OSL reader into R. Function version 0.3.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
 \references{
 -
 }
 \seealso{
-\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}}
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+\code{\link[data.table]{data.table}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), \cr
+Anotine Zink, C2RMF, Palais du Louvre, Paris (France)\cr
+\cr The ASCII-file import is based on a suggestion by Willian Amidon and Andrew Louis Gorin
+\cr R Luminescence Package Team}
 \keyword{IO}
-
diff --git a/man/read_PSL2R.Rd b/man/read_PSL2R.Rd
new file mode 100644
index 0000000..eb911f2
--- /dev/null
+++ b/man/read_PSL2R.Rd
@@ -0,0 +1,80 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/read_PSL2R.R
+\name{read_PSL2R}
+\alias{read_PSL2R}
+\title{Import PSL files to R}
+\usage{
+read_PSL2R(file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = FALSE,
+  merge = FALSE, ...)
+}
+\arguments{
+\item{file}{\code{\link{character}} (\bold{required}): path and file name of the
+PSL file. If input is a \code{vector} it should comprise only \code{character}s representing
+valid paths and PSL file names.
+Alternatively the input character can be just a directory (path). In this case the
+the function tries to detect and import all PSL files found in the directory.}
+
+\item{drop_bg}{\code{\link{logical}} (with default): \code{TRUE} to automatically 
+remove all non-OSL/IRSL curves.}
+
+\item{as_decay_curve}{\code{\link{logical}} (with default): Portable OSL Reader curves
+are often given as cumulative light sum curves. Use \code{TRUE} (default) to convert
+the curves to the more usual decay form.}
+
+\item{smooth}{\code{\link{logical}} (with default): \code{TRUE} to apply 
+Tukey's Running Median Smoothing for OSL and IRSL decay curves. Smoothing is
+encouraged if you see random signal drops within the decay curves related 
+to hardware errors.}
+
+\item{merge}{\code{\link{logical}} (with default): \code{TRUE} to merge all 
+\code{RLum.Analysis} objects. Only applicable if multiple files are imported.}
+
+\item{...}{currently not used.}
+}
+\value{
+Returns an S4 \code{\linkS4class{RLum.Analysis}} object containing
+\code{\linkS4class{RLum.Data.Curve}} objects for each curve.
+}
+\description{
+Imports PSL files produced by a SUERC portable OSL reader into R \bold{(BETA)}.
+}
+\details{
+This function provides an import routine for the SUERC portable OSL Reader PSL format.
+PSL files are just plain text and can be viewed with any text editor. Due to the 
+formatting of PSL files this import function relies heavily on regular expression to find and 
+extract all relevant information. See \bold{note}.
+}
+\note{
+Because this function relies heavily on regular expressions to parse 
+PSL files it is currently only in beta status. If the routine fails to import
+a specific PSL file please report to <christoph.burow at uni-koeln.de> so the
+function can be updated.
+}
+\section{Function version}{
+ 0.0.1 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+# (1) Import PSL file to R
+
+\dontrun{
+FILE <- file.choose()
+temp <- read_PSL2R(FILE)
+temp
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data.Curve}},
+\code{\linkS4class{RLum.Data.Curve}}
+}
+\author{
+Christoph Burow, University of Cologne (Germany)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Burow, C. (2017). read_PSL2R(): Import PSL files to R. Function version 0.0.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/man/read_SPE2R.Rd b/man/read_SPE2R.Rd
index 58f5024..9fe72e7 100644
--- a/man/read_SPE2R.Rd
+++ b/man/read_SPE2R.Rd
@@ -67,8 +67,9 @@ The function has been successfully tested for SPE format versions 2.x.
 supported.}
 }
 \section{Function version}{
- 0.1.0 (2016-05-02 09:42:32)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -95,15 +96,15 @@ supported.}
 #             sep = ";", row.names = FALSE)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). read_SPE2R(): Import Princeton Intruments (TM) SPE-file into R. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File
-Format Specification, Version 1.A,
-\url{ftp://ftp.princetoninstruments.com/Public/Manuals/Princeton\%20Instruments/SPE\%203.0\%20File\%20Format\%20Specification.pdf}
+Format Specification, Version 1.A (for document URL please use an internet search machine)
 
 Hall, C., 2012: readSPE.m.
 \url{http://www.mathworks.com/matlabcentral/fileexchange/35940-readspe/content/readSPE.m}
@@ -112,5 +113,8 @@ Hall, C., 2012: readSPE.m.
 \code{\link{readBin}}, \code{\linkS4class{RLum.Data.Spectrum}},
 \code{\link[raster]{raster}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{IO}
-
diff --git a/man/read_XSYG2R.Rd b/man/read_XSYG2R.Rd
index 1fbc6ee..0766817 100644
--- a/man/read_XSYG2R.Rd
+++ b/man/read_XSYG2R.Rd
@@ -110,8 +110,9 @@ be done using the functions provided with the package \code{\link{xml}}.\cr
 the XSXG file are skipped.
 }
 \section{Function version}{
- 0.5.7 (2016-09-05 20:21:40)
+ 0.5.8 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 
@@ -140,11 +141,12 @@ OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]]
 structure_RLum(OSL.SARMeasurement$Sequence.Object)
 
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). read_XSYG2R(): Import XSYG files to R. Function version 0.5.8. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 Grehl, S., Kreutzer, S., Hoehne, M., 2013. Documentation of the
 XSYG file format. Unpublished Technical Note. Freiberg, Germany \cr\cr
@@ -155,5 +157,8 @@ XSYG file format. Unpublished Technical Note. Freiberg, Germany \cr\cr
 \code{\link{xml}}, \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Data.Curve}}, \code{\link{approx}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{IO}
-
diff --git a/man/replicate_RLum.Rd b/man/replicate_RLum.Rd
index 4c68459..e5d1bfa 100644
--- a/man/replicate_RLum.Rd
+++ b/man/replicate_RLum.Rd
@@ -19,14 +19,19 @@ Returns a \code{\link{list}} of the object to be repeated
 Function replicates RLum S4 class objects and returns a list for this objects
 }
 \section{Function version}{
- 0.1.0 (2015-11-29 17:27:48)
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\seealso{
+\code{\linkS4class{RLum}},
 }
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
-\cr R Luminescence Package Team}
-\seealso{
-\code{\linkS4class{RLum}},
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). replicate_RLum(): General replication function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\keyword{utilities}
 
+\keyword{utilities}
diff --git a/man/report_RLum.Rd b/man/report_RLum.Rd
index 542f5ee..cc0b394 100644
--- a/man/report_RLum.Rd
+++ b/man/report_RLum.Rd
@@ -115,8 +115,9 @@ CSS styling can be turned of using \code{css = FALSE}.
 This function requires the R packages 'rmarkdown', 'pander' and 'rstudioapi'.
 }
 \section{Function version}{
- 0.1.0 (2016-09-09 10:32:17)
+ 0.1.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 \dontrun{
@@ -177,12 +178,16 @@ x <- list(x = 1:10,
 report_RLum(object = x, file = "~/arbitray_list")
 }
 }
-\author{
-Christoph Burow, University of Cologne (Germany) \cr
-\cr R Luminescence Package Team}
 \seealso{
 \code{\link[rmarkdown]{render}}, \code{\link[pander]{pander_return}},
 \code{\link[pander]{openFileInOS}}, \code{\link[rstudioapi]{viewer}},
 \code{\link{browseURL}}
 }
+\author{
+Christoph Burow, University of Cologne (Germany) \cr
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Burow, C. (2017). report_RLum(): Create a HTML report for (RLum) objects. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
 
diff --git a/man/sTeve.Rd b/man/sTeve.Rd
index 10fae34..c4ca3f4 100644
--- a/man/sTeve.Rd
+++ b/man/sTeve.Rd
@@ -33,15 +33,19 @@ This function should not be taken too seriously.
 
 ##no example available
 
+} 
+
+\section{How to cite}{
+NA, NA, ,  (2017). sTeve(): sTeve - sophisticated tool for efficient data validation and evaluation. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-R Luminescence Team, 2012-2013
-}
+
 \references{
 #
 }
 \seealso{
 \link{plot_KDE}
 }
+\author{
+R Luminescence Team, 2012-2013
+}
 \keyword{manip}
-
diff --git a/man/set_RLum.Rd b/man/set_RLum.Rd
index 1f3fd70..61a2240 100644
--- a/man/set_RLum.Rd
+++ b/man/set_RLum.Rd
@@ -38,8 +38,9 @@ corresponding \code{\linkS4class{RLum}} class: \code{\linkS4class{RLum.Data.Curv
 \code{\linkS4class{RLum.Analysis}} and \code{\linkS4class{RLum.Results}}
 }
 \section{Function version}{
- 0.3.0 (2016-05-02 09:43:47)
+ 0.3.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##produce empty objects from each class
@@ -60,10 +61,6 @@ data = matrix(c(1:100,exp(-c(1:100))),ncol = 2))
 plot_RLum(object)
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
 \seealso{
 \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Image}},
@@ -71,5 +68,13 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}
 }
-\keyword{utilities}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). set_RLum(): General set function for RLum S4 class objects. Function version 0.3.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{utilities}
diff --git a/man/set_Risoe.BINfileData.Rd b/man/set_Risoe.BINfileData.Rd
index 88ac443..29c7c4f 100644
--- a/man/set_Risoe.BINfileData.Rd
+++ b/man/set_Risoe.BINfileData.Rd
@@ -4,7 +4,8 @@
 \alias{set_Risoe.BINfileData}
 \title{General accessor function for RLum S4 class objects}
 \usage{
-set_Risoe.BINfileData(METADATA, DATA, .RESERVED)
+set_Risoe.BINfileData(METADATA = data.frame(), DATA = list(),
+  .RESERVED = list())
 }
 \arguments{
 \item{METADATA}{x}
@@ -26,14 +27,19 @@ corresponding get function will be selected. Allowed arguments can be found
 in the documentations of the corresponding \code{\linkS4class{Risoe.BINfileData}} class.
 }
 \section{Function version}{
- 0.1 (2015-11-29 17:27:48)
+ 0.1 (2017-06-29 18:40:14)
+}
+
+\seealso{
+\code{\linkS4class{Risoe.BINfileData}}
 }
 \author{
 Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 (France)
-\cr R Luminescence Package Team}
-\seealso{
-\code{\linkS4class{Risoe.BINfileData}}
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). set_Risoe.BINfileData(): General accessor function for RLum S4 class objects. Function version 0.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\keyword{utilities}
 
+\keyword{utilities}
diff --git a/man/smooth_RLum.Rd b/man/smooth_RLum.Rd
new file mode 100644
index 0000000..6a3a42e
--- /dev/null
+++ b/man/smooth_RLum.Rd
@@ -0,0 +1,74 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/smooth_RLum.R
+\docType{methods}
+\name{smooth_RLum}
+\alias{smooth_RLum}
+\alias{smooth_RLum,list-method}
+\title{Smoothing of data}
+\usage{
+smooth_RLum(object, ...)
+
+\S4method{smooth_RLum}{list}(object, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} (\bold{required}): S4 object of
+class \code{RLum}}
+
+\item{...}{further arguments passed to the specifc class method}
+}
+\value{
+An object of the same type as the input object is provided
+}
+\description{
+Function calls the object-specific smooth functions for provided RLum S4-class objects.
+}
+\details{
+The function provides a generalised access point for specific
+\code{\linkS4class{RLum}} objects.\cr Depending on the input object, the
+corresponding function will be selected. Allowed arguments can be found
+in the documentations of the corresponding \code{\linkS4class{RLum}} class. The smoothing
+is based on an internal function called \code{.smoothing}.
+}
+\section{Methods (by class)}{
+\itemize{
+\item \code{list}: Returns a list of \code{\linkS4class{RLum}} objects that had been passed to \code{\link{smooth_RLum}}
+}}
+
+\note{
+Currenlty only \code{RLum} objects of class \code{RLum.Data.Curve} and \code{RLum.Analysis} (with curve data) are supported!
+}
+\section{Function version}{
+ 0.1.0 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+##load example data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+
+##create RLum.Data.Curve object from this example
+curve <-
+  set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = "OSL",
+      data = as.matrix(ExampleData.CW_OSL_Curve)
+  )
+
+##plot data without and with smoothing
+plot_RLum(curve)
+plot_RLum(smooth_RLum(curve))
+
+}
+\seealso{
+\code{\linkS4class{RLum.Data.Curve}}, \code{\linkS4class{RLum.Analysis}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). smooth_RLum(): Smoothing of data. Function version 0.1.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{utilities}
diff --git a/man/structure_RLum.Rd b/man/structure_RLum.Rd
index bab1922..a060f99 100644
--- a/man/structure_RLum.Rd
+++ b/man/structure_RLum.Rd
@@ -26,8 +26,9 @@ corresponding structure function will be selected. Allowed arguments can be foun
 in the documentations of the corresponding \code{\linkS4class{RLum}} class.
 }
 \section{Function version}{
- 0.2.0 (2016-05-02 09:36:06)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##load example data
@@ -37,10 +38,6 @@ data(ExampleData.XSYG, envir = environment())
 structure_RLum(OSL.SARMeasurement$Sequence.Object)
 
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
 \seealso{
 \code{\linkS4class{RLum.Data.Curve}},
 \code{\linkS4class{RLum.Data.Image}},
@@ -48,5 +45,13 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
 \code{\linkS4class{RLum.Analysis}},
 \code{\linkS4class{RLum.Results}}
 }
-\keyword{utilities}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team} 
 
+\section{How to cite}{
+Kreutzer, S. (2017). structure_RLum(): General structure function for RLum S4 class objects. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{utilities}
diff --git a/man/template_DRAC.Rd b/man/template_DRAC.Rd
index 703912f..da97c49 100644
--- a/man/template_DRAC.Rd
+++ b/man/template_DRAC.Rd
@@ -37,7 +37,7 @@ print(input[[4]])
 input$`Project ID` <- "DRAC-Example"
 input$`Sample ID` <- "Quartz"
 input$`Conversion factors` <- "AdamiecAitken1998"
-input$`ExternalU (ppm)` <- 3.4
+input$`External U (ppm)` <- 3.4
 input$`errExternal U (ppm)` <- 0.51
 input$`External Th (ppm)` <- 14.47
 input$`errExternal Th (ppm)` <- 1.69
@@ -65,10 +65,12 @@ input$`errDe (Gy)` <- 0.2
 output <- use_DRAC(input)
 }
 
+} 
+
+\section{How to cite}{
+Burow, C. (2017). template_DRAC(): Create a DRAC input data template (v1.1). In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Christoph Burow, University of Cologne (Germany)
-}
+
 \references{
 Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
 Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
@@ -76,4 +78,6 @@ Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
 \seealso{
 \code{\link{as.data.frame}} \code{\link{list}}
 }
-
+\author{
+Christoph Burow, University of Cologne (Germany)
+}
diff --git a/man/tune_Data.Rd b/man/tune_Data.Rd
index e8a3f8a..1c13252 100644
--- a/man/tune_Data.Rd
+++ b/man/tune_Data.Rd
@@ -27,8 +27,9 @@ The error can be reduced and sample size increased for specific purpose.
 You should not use this function to improve your poor data set!
 }
 \section{Function version}{
- 0.5.0 (2015-11-29 17:27:48)
+ 0.5.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 ## load example data set
 data(ExampleData.DeValues, envir = environment())
@@ -47,15 +48,19 @@ plot_AbanicoPlot(data = tune_Data(x, decrease.error = 0.1),
 #                summary = c("n", "mean"))
 
 
+} 
+
+\section{How to cite}{
+Dietze, M. (2017). tune_Data(): Tune data for experimental purpose. Function version 0.5.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Michael Dietze, GFZ Potsdam (Germany)
-\cr R Luminescence Package Team}
+
 \references{
 #
 }
 \seealso{
 #
 }
+\author{
+Michael Dietze, GFZ Potsdam (Germany)
+\cr R Luminescence Package Team}
 \keyword{manip}
-
diff --git a/man/use_DRAC.Rd b/man/use_DRAC.Rd
index c78881e..6e24400 100644
--- a/man/use_DRAC.Rd
+++ b/man/use_DRAC.Rd
@@ -43,13 +43,14 @@ pre-formatted XLS/XLSX file is passed to the DRAC website and the
 results are re-imported into R.
 }
 \section{Function version}{
- 0.1.0 (2015-12-05 15:52:49)
+ 0.1.1 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ## (1) Method using the DRAC spreadsheet
 
-file <-  "/PATH/TO/DRAC_Input_and_Output_Template.xlsx"
+file <-  "/PATH/TO/DRAC_Input_Template.csv"
 
 # send the actual IO template spreadsheet to DRAC
 \dontrun{
@@ -67,7 +68,7 @@ input <- template_DRAC()
 input$`Project ID` <- "DRAC-Example"
 input$`Sample ID` <- "Quartz"
 input$`Conversion factors` <- "AdamiecAitken1998"
-input$`ExternalU (ppm)` <- 3.4
+input$`External U (ppm)` <- 3.4
 input$`errExternal U (ppm)` <- 0.51
 input$`External Th (ppm)` <- 14.47
 input$`errExternal Th (ppm)` <- 1.69
@@ -95,13 +96,17 @@ input$`errDe (Gy)` <- 0.2
 output <- use_DRAC(input)
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S., Dietze, M., Burow, C. (2017). use_DRAC(): Use DRAC to calculate dose rate data. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Michael Dietze,
-GFZ Potsdam (Germany), Christoph Burow, University of Cologne (Germany)\cr
-\cr R Luminescence Package Team}
+
 \references{
 Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating.
 Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012
 }
-
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France), Michael Dietze,
+GFZ Potsdam (Germany), Christoph Burow, University of Cologne (Germany)\cr
+\cr R Luminescence Package Team}
diff --git a/man/verify_SingleGrainData.Rd b/man/verify_SingleGrainData.Rd
index 38a965f..c1dac73 100644
--- a/man/verify_SingleGrainData.Rd
+++ b/man/verify_SingleGrainData.Rd
@@ -2,7 +2,7 @@
 % Please edit documentation in R/verify_SingleGrainData.R
 \name{verify_SingleGrainData}
 \alias{verify_SingleGrainData}
-\title{Verify single grain data sets and check for invalid grains, i.e. zero light level grains}
+\title{Verify single grain data sets and check for invalid grains, i.e. zero-light level grains}
 \usage{
 verify_SingleGrainData(object, threshold = 10, cleanup = FALSE,
   cleanup_level = "aliquot", verbose = TRUE, plot = FALSE)
@@ -24,9 +24,9 @@ is selected every single curve marked as \code{invalid} is removed. If \code{"al
 curves of one aliquot (grain or disc) can be marked as invalid, but will not be removed. An aliquot
 will be only removed if all curves of this aliquot are marked as invalid.}
 
-\item{verbose}{\code{\link{logical}} (with default): enables or disables terminal feedback}
+\item{verbose}{\code{\link{logical}} (with default): enables or disables the terminal feedback}
 
-\item{plot}{\code{\link{logical}} (with default): enables or disables graphical feedback}
+\item{plot}{\code{\link{logical}} (with default): enables or disables the graphical feedback}
 }
 \value{
 The function returns
@@ -50,17 +50,17 @@ The original function call\cr
 
 \bold{Output variation}\cr
 
-For \code{cleanup = TRUE} the same object as the input, but with cleaned up (invalid curves removed).
+For \code{cleanup = TRUE} the same object as the input is returned, but cleaned up (invalid curves were removed).
 This means: Either an \code{\linkS4class{Risoe.BINfileData}} or an \code{\linkS4class{RLum.Analysis}}
 object is returned in such cases. An \code{\linkS4class{Risoe.BINfileData}} object can be exported
 to a BIN-file by using the function \code{\link{write_R2BIN}}.
 }
 \description{
-This function tries to identify automatically zero light level curves (grains) from single grain data
+This function tries to identify automatically zero-light level curves (grains) from single grain data
 measurements. \cr
 }
 \details{
-\bold{How the method works?}\cr
+\bold{How does the method work?}\cr
 
 The function compares the expected values (\eqn{E(X)}) and the variance (\eqn{Var(X)})
 of the count values for each curve. Assuming that the background roughly follows a poisson
@@ -72,11 +72,11 @@ Thus the function checks for:
 
 \deqn{abs(E(x) - Var(x)) >= \Theta}
 
-With \eqn{\Theta} an arbitray, user defined, threshold. Values above indicating curves
+With \eqn{\Theta} an arbitray, user defined, threshold. Values above the threshold indicating curves
 comprising a signal.\cr
 
 Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the ratio was chosen as
-both can become 0 which would result in \code{Inf} values.
+both terms can become 0 which would result in 0 or \code{Inf}, if the ratio is calculated.
 }
 \note{
 This function can work with \code{\linkS4class{Risoe.BINfileData}} objects or
@@ -89,8 +89,9 @@ within a SAR cycle are removed as well. Therefore it is strongly recommended to
 \code{cleanup = TRUE} carefully.
 }
 \section{Function version}{
- 0.2.0 (2016-06-20 19:34:56)
+ 0.2.0 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##01 - basic example I
@@ -120,10 +121,12 @@ object <- verify_SingleGrainData(object, cleanup = TRUE)
 write_R2BIN(object, paste0(dirname(file),"/", basename(file), "_CLEANED.BIN"))
 }
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). verify_SingleGrainData(): Verify single grain data sets and check for invalid grains, i.e. zero-light level grains. Function version 0.2.0. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-\cr R Luminescence Package Team}
+
 \references{
 -
 }
@@ -131,6 +134,8 @@ Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
 \code{\linkS4class{Risoe.BINfileData}}, \code{\linkS4class{RLum.Analysis}},
 \code{\link{write_R2BIN}}, \code{\link{read_BIN2R}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team}
 \keyword{datagen}
 \keyword{manip}
-
diff --git a/man/write_R2BIN.Rd b/man/write_R2BIN.Rd
index 6f31687..9631de6 100644
--- a/man/write_R2BIN.Rd
+++ b/man/write_R2BIN.Rd
@@ -60,8 +60,9 @@ ROI definitions (introduced in BIN-file version 8) are not supported! There are
 ignored by the function \code{\link{read_BIN2R}}.
 }
 \section{Function version}{
- 0.4.0 (2016-06-13 21:17:19)
+ 0.4.2 (2017-06-29 18:40:14)
 }
+
 \examples{
 
 ##uncomment for usage
@@ -69,18 +70,22 @@ ignored by the function \code{\link{read_BIN2R}}.
 ##data(ExampleData.BINfileData, envir = environment())
 ##write_R2BIN(CWOSL.SAR.Data, file="[your path]/output.bin")
 
+} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). write_R2BIN(): Export Risoe.BINfileData into Risoe BIN-file. Function version 0.4.2. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
 }
-\author{
-Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
-(France)
-\cr R Luminescence Package Team}
+
 \references{
 DTU Nutech, 2016. The Squence Editor, Users Manual, February, 2016.
-\url{http://www.nutech.dtu.dk/english/Products-and-Services/Dosimetry/Radiation-Measurement-Instruments/TL_OSL_reader/Manuals}
+\url{http://www.nutech.dtu.dk/english/products-and-services/radiation-instruments/tl_osl_reader/manuals}
 }
 \seealso{
 \code{\link{read_BIN2R}}, \code{\linkS4class{Risoe.BINfileData}},
 \code{\link{writeBin}}
 }
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne
+(France)
+\cr R Luminescence Package Team}
 \keyword{IO}
-
diff --git a/man/write_RLum2CSV.Rd b/man/write_RLum2CSV.Rd
new file mode 100644
index 0000000..94029c3
--- /dev/null
+++ b/man/write_RLum2CSV.Rd
@@ -0,0 +1,81 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/write_RLum2CSV.R
+\name{write_RLum2CSV}
+\alias{write_RLum2CSV}
+\title{Export RLum-objects to CSV}
+\usage{
+write_RLum2CSV(object, path = NULL, prefix = "", export = TRUE, ...)
+}
+\arguments{
+\item{object}{\code{\linkS4class{RLum}} or a \code{\link{list}} of \code{RLum} objects (\bold{required}): objects to be written}
+
+\item{path}{\code{\link{character}} (optional): character string naming folder for the output to be written. If nothing
+is provided \code{path} will be set to the working directory. Note: this argument is ignored if the
+the argument \code{export} is set to \code{FALSE}.}
+
+\item{prefix}{\code{\link{character}} (with default): optional prefix to name the files. This prefix
+is valid for all written files}
+
+\item{export}{\code{\link{logical}} (with default): enable or disable the file export. If set to \code{FALSE}
+nothing is written to the file connection, but a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+is returned instead}
+
+\item{\dots}{further arguments that will be passed to the function \code{\link[utils]{write.table}}. All arguments
+except the argument \code{file} are supported}
+}
+\value{
+The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE}
+a list comprising objects of type \code{link{data.frame}} and \code{\link{matrix}}
+}
+\description{
+This function exports \code{\linkS4class{RLum}}-objects to CSV-files using the R function
+\code{\link[utils]{write.table}}. All \code{\linkS4class{RLum}}-objects are supported, but the
+export is lossy, i.e. the pure numerical values are exported only. Information that cannot
+be coerced to a \code{\link{data.frame}} or a \code{\link{matrix}} are discarded as well as
+metadata.
+}
+\details{
+However, in combination with the implemented import functions, nearly every supported
+import data format can be exported to CSV-files, this gives a great deal of freedom in terms of
+compatibility with other tools.\cr
+
+\bold{Input is a list of objects}\cr
+
+If the input is a \code{\link{list}} of objects all explicit function arguments can be provided
+as \code{\link{list}}.
+}
+\section{Function version}{
+ 0.1.1 (2017-06-29 18:40:14)
+}
+
+\examples{
+
+##transform values to a list
+data(ExampleData.BINfileData, envir = environment())
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]]
+write_RLum2CSV(object, export = FALSE)
+
+\dontrun{
+
+##export data to CSV-files in the working directory;
+##BE CAREFUL, this example creates many files on your file system
+data(ExampleData.BINfileData, envir = environment())
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]]
+write_RLum2CSV(object, export = FALSE)
+
+}
+
+}
+\seealso{
+\code{\linkS4class{RLum.Analysis}}, \code{\linkS4class{RLum.Data}}, \code{\linkS4class{RLum.Results}},
+\code{\link[utils]{write.table}}
+}
+\author{
+Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+\cr R Luminescence Package Team} 
+
+\section{How to cite}{
+Kreutzer, S. (2017). write_RLum2CSV(): Export RLum-objects to CSV. Function version 0.1.1. In: Kreutzer, S., Dietze, M., Burow, C., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J. (2017). Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.7.5. https://CRAN.R-project.org/package=Luminescence
+}
+
+\keyword{IO}
diff --git a/src/Luminescence_init.c b/src/Luminescence_init.c
new file mode 100644
index 0000000..4bb4136
--- /dev/null
+++ b/src/Luminescence_init.c
@@ -0,0 +1,26 @@
+#include <R.h>
+#include <Rinternals.h>
+#include <stdlib.h> // for NULL
+#include <R_ext/Rdynload.h>
+
+/* FIXME:
+  Check these declarations against the C/Fortran source code.
+*/
+
+  /* .Call calls */
+extern SEXP Luminescence_analyse_IRSARRF_SRS(SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP Luminescence_create_RLumDataCurve_matrix(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP);
+extern SEXP Luminescence_create_UID();
+
+static const R_CallMethodDef CallEntries[] = {
+  {"Luminescence_analyse_IRSARRF_SRS",         (DL_FUNC) &Luminescence_analyse_IRSARRF_SRS,          5},
+  {"Luminescence_create_RLumDataCurve_matrix", (DL_FUNC) &Luminescence_create_RLumDataCurve_matrix, 10},
+  {"Luminescence_create_UID",                  (DL_FUNC) &Luminescence_create_UID,                   0},
+  {NULL, NULL, 0}
+};
+
+void R_init_Luminescence(DllInfo *dll)
+{
+  R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
+  R_useDynamicSymbols(dll, FALSE);
+}
diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp
index cc9352a..16f01a7 100644
--- a/src/RcppExports.cpp
+++ b/src/RcppExports.cpp
@@ -7,15 +7,37 @@
 using namespace Rcpp;
 
 // analyse_IRSARRF_SRS
-RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited, NumericVector values_natural_limited, int n_MC);
-RcppExport SEXP Luminescence_analyse_IRSARRF_SRS(SEXP values_regenerated_limitedSEXP, SEXP values_natural_limitedSEXP, SEXP n_MCSEXP) {
+RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited, NumericVector values_natural_limited, NumericVector vslide_range, int n_MC, bool trace);
+RcppExport SEXP Luminescence_analyse_IRSARRF_SRS(SEXP values_regenerated_limitedSEXP, SEXP values_natural_limitedSEXP, SEXP vslide_rangeSEXP, SEXP n_MCSEXP, SEXP traceSEXP) {
 BEGIN_RCPP
     Rcpp::RObject rcpp_result_gen;
     Rcpp::RNGScope rcpp_rngScope_gen;
     Rcpp::traits::input_parameter< NumericVector >::type values_regenerated_limited(values_regenerated_limitedSEXP);
     Rcpp::traits::input_parameter< NumericVector >::type values_natural_limited(values_natural_limitedSEXP);
+    Rcpp::traits::input_parameter< NumericVector >::type vslide_range(vslide_rangeSEXP);
     Rcpp::traits::input_parameter< int >::type n_MC(n_MCSEXP);
-    rcpp_result_gen = Rcpp::wrap(analyse_IRSARRF_SRS(values_regenerated_limited, values_natural_limited, n_MC));
+    Rcpp::traits::input_parameter< bool >::type trace(traceSEXP);
+    rcpp_result_gen = Rcpp::wrap(analyse_IRSARRF_SRS(values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace));
+    return rcpp_result_gen;
+END_RCPP
+}
+// create_RLumDataCurve_matrix
+NumericMatrix create_RLumDataCurve_matrix(NumericVector DATA, int VERSION, int NPOINTS, String LTYPE, int LOW, int HIGH, int AN_TEMP, int TOLDELAY, int TOLON, int TOLOFF);
+RcppExport SEXP Luminescence_create_RLumDataCurve_matrix(SEXP DATASEXP, SEXP VERSIONSEXP, SEXP NPOINTSSEXP, SEXP LTYPESEXP, SEXP LOWSEXP, SEXP HIGHSEXP, SEXP AN_TEMPSEXP, SEXP TOLDELAYSEXP, SEXP TOLONSEXP, SEXP TOLOFFSEXP) {
+BEGIN_RCPP
+    Rcpp::RObject rcpp_result_gen;
+    Rcpp::RNGScope rcpp_rngScope_gen;
+    Rcpp::traits::input_parameter< NumericVector >::type DATA(DATASEXP);
+    Rcpp::traits::input_parameter< int >::type VERSION(VERSIONSEXP);
+    Rcpp::traits::input_parameter< int >::type NPOINTS(NPOINTSSEXP);
+    Rcpp::traits::input_parameter< String >::type LTYPE(LTYPESEXP);
+    Rcpp::traits::input_parameter< int >::type LOW(LOWSEXP);
+    Rcpp::traits::input_parameter< int >::type HIGH(HIGHSEXP);
+    Rcpp::traits::input_parameter< int >::type AN_TEMP(AN_TEMPSEXP);
+    Rcpp::traits::input_parameter< int >::type TOLDELAY(TOLDELAYSEXP);
+    Rcpp::traits::input_parameter< int >::type TOLON(TOLONSEXP);
+    Rcpp::traits::input_parameter< int >::type TOLOFF(TOLOFFSEXP);
+    rcpp_result_gen = Rcpp::wrap(create_RLumDataCurve_matrix(DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF));
     return rcpp_result_gen;
 END_RCPP
 }
diff --git a/src/analyse_IRSARRF_SRS.cpp b/src/analyse_IRSARRF_SRS.cpp
index d507fc4..ae4c7f9 100644
--- a/src/analyse_IRSARRF_SRS.cpp
+++ b/src/analyse_IRSARRF_SRS.cpp
@@ -1,41 +1,144 @@
 //analyse_IRSARRF_SRS.cpp
 //author: Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
-//version: 0.2.0 [2015-10-10]
+//version: 0.3.5 [2017-02-06]
 //Function calculates the squared residuals for the R function analyse_IRSAR.RF()
-//including MC runs for the obtained minimum
+//including MC runs for the obtained minimum. The function allows a horizontal and
+//a vertical sliding of the curve
 //
+#include <RcppArmadillo.h>
 #include <RcppArmadilloExtensions/sample.h>
-// [[Rcpp::depends(RcppArmadillo)]]
 
+// [[Rcpp::depends(RcppArmadillo)]]
 using namespace Rcpp;
 
 // [[Rcpp::export(".analyse_IRSARRF_SRS")]]
 RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited,
                                     NumericVector values_natural_limited,
-                                    int n_MC
+                                    NumericVector vslide_range,
+                                    int n_MC,
+                                    bool trace = false
                                     ){
 
+  //check for the vslide_range()
+  if(vslide_range.length() > 1e+08){
+    stop("[:::.analyse_IRSAR_SRS()] 'vslide_range' exceeded maximum size (1e+08)!");
+  }
 
   //pre-define variables
   NumericVector residuals = values_natural_limited.length();
   NumericVector results = values_regenerated_limited.size() - values_natural_limited.size();
   NumericVector results_vector_min_MC = n_MC;
 
+   //variables for the algorithm
+   int v_length;
+   int v_index;
+   NumericVector v_leftright(2); //the virtual vector
+   NumericVector t_leftright(2); //the test points
+   NumericVector c_leftright(2); //the calculation
+
+   //(1) calculate sum of the squared residuals
+   // this will be used to find the best fit of the curves (which is the minimum)
+
+   //initialise values
+   v_length = vslide_range.length();
+   v_index = 0;
+
+   v_leftright[0] = 0;
+   v_leftright[1] = vslide_range.length() - 1;
+
+   if(v_length == 1){
+     t_leftright[0] = 0;
+     t_leftright[1] = 0;
+
+   }else{
+     t_leftright[0] = v_length/3;
+     t_leftright[1] = 2 * v_length/3;
+
+   }
+
+   //***TRACE****
+   if(trace == true){
+      Rcout << "\n\n [:::.analyse_IRSAR_SRS()]";
+      Rcout << "\n\n--- Inititalisation --- \n ";
+      Rcout << "\n >> v_leftright: " << v_leftright;
+      Rcout << "\n >> t_leftright: " << t_leftright;
+      Rcout << "\n\n --- Optimisation --- \n ";
+      Rcout << "\n ---------------------------------------------------------------------------------------------------------";
+      Rcout << "\n v_length \t\t v_leftright \t\t  c_leftright  \t\t\t\t absolute offset";
+      Rcout << "\n ---------------------------------------------------------------------------------------------------------";
+
+   }
+
+   //start loop
+   do {
+
+    for (int t=0;t<t_leftright.length(); t++){
+
+      //HORIZONTAL SLIDING CORE -------------------------------------------------------------(start)
+      //slide the curves against each other
+      for (int i=0; i<results.length(); ++i){
+
+        //calculate squared residuals along one curve
+        for (int j=0; j<values_natural_limited.length(); ++j){
+          residuals[j] = pow((values_regenerated_limited[j+i] - (values_natural_limited[j] + vslide_range[t_leftright[t]])),2);
+
+        }
+
+       //sum results and fill the results vector
+       results[i] = sum(residuals);
 
-  //(1) calculate sum of the squared residuals
-  // this will be used to find the best fit of the curves (which is the minimum)
-  for (int i=0; i<results.length(); ++i){
+      }
+
+      //HORIZONTAL SLIDING CORE ---------------------------------------------------------------(end)
+      c_leftright[t] = min(results);
 
-    //squared residuals
-    for (int j=0; j<values_natural_limited.length(); ++j){
-      residuals[j] = pow((values_regenerated_limited[j+i] - values_natural_limited[j]),2);
 
     }
+      //compare results and re-initialise variables
 
-    //sum up the residuals
-    results[i] = sum(residuals);
+      if(c_leftright[0] < c_leftright[1]){
+        v_index = v_leftright[0]; //set index to left test index
 
-  }
+        //update vector window (the left remains the same)
+        v_leftright[1] = t_leftright[1];
+
+        //update window length
+        v_length = v_leftright[1] - v_leftright[0];
+
+      }else if (c_leftright[0] > c_leftright[1]){
+        v_index = v_leftright[1]; //set index to right test index
+
+        //update vector window (the right remains the same this time)
+        v_leftright[0] = t_leftright[0];
+
+        //update window length
+        v_length = v_leftright[1] - v_leftright[0];
+
+      }else{
+        v_length = 1;
+
+      }
+
+      //update test point index
+      t_leftright[0] = v_leftright[0] + v_length/3;
+      t_leftright[1] = v_leftright[0] + (2 * (v_length/3));
+
+      //***TRACE****
+      if(trace == true){
+        Rcout << "\n " << v_length << " \t\t\t " << v_leftright << " \t\t " << c_leftright << " \t\t\t " << vslide_range[v_index];
+
+      }
+
+   } while (v_length > 1);
+
+   //***TRACE****
+   if(trace == true){
+    Rcout << "\n ---------------------------------------------------------------------------------------------------------";
+    Rcout << "\n >> SRS minimum: \t\t " << c_leftright[0];
+    Rcout << "\n >> Vertical offset index: \t " << v_index + 1;
+    Rcout << "\n >> Vertical offset absolute: \t " << vslide_range[v_index] << "\n\n";
+
+   }
 
   //(2) error calculation
   //use this values to bootstrap and find minimum values and to account for the variation
@@ -43,7 +146,10 @@ RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited,
   //
   //using the obtained sliding vector and the function RcppArmadillo::sample() (which equals the
   //function sample() in R, but faster)
-  //http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation/
+  //http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation
+
+
+ //this follows the way described in Frouin et al., 2017 ... still ...
   for (int i=0; i<results_vector_min_MC.length(); ++i){
     results_vector_min_MC[i] = min(
       RcppArmadillo::sample(
@@ -55,12 +161,19 @@ RcppExport SEXP analyse_IRSARRF_SRS(NumericVector values_regenerated_limited,
     );
   }
 
-  //build list with two elements
+  //build list with four elements
   //sliding_vector: the original results_vector (this can be used to reproduced the results in R)
+  //sliding_vector_min_index: the index of the minimum, it is later also calculated in R, however, sometimes we may need it directly
   //sliding_vector_min_MC: minimum values based on bootstrapping
+  //vslide_index: this is the index where the minium was identified for the vertical sliding
+  //vslide_minium: return the identified minium value, this helps to re-run the function, as the
+  //algorithm might got trapped in the local minimum
   List results_list;
     results_list["sliding_vector"] = results;
+    results_list["sliding_vector_min_index"] = (int)which_min(results) + 1;
     results_list["sliding_vector_min_MC"] = results_vector_min_MC;
+    results_list["vslide_index"] = v_index + 1;
+    results_list["vslide_minimum"] = c_leftright[0]; //left and right should be similar
 
   return results_list;
 }
diff --git a/src/create_RLumDataCurve_matrix.cpp b/src/create_RLumDataCurve_matrix.cpp
new file mode 100644
index 0000000..f14f525
--- /dev/null
+++ b/src/create_RLumDataCurve_matrix.cpp
@@ -0,0 +1,115 @@
+//create_RLumDataCurve_matrix.cpp
+//author: Sebastian Kreutzer, IRAMAT-CRP2A, Universite Bordeaux Montaigne (France)
+//version: 0.1.0 [2016-06-28]
+//Function to create the RLum.Data.Curve() matrix ... faster than in R itself
+#include <Rcpp.h>
+using namespace Rcpp;
+
+// -----------------------------------------------------------------------------------------------
+// Define own function to create a sequence for the x-axis
+// .. but we do not export them to avoid side effects, as this function is not the same as the
+// .. base R function seq()
+// ..no export
+NumericVector seq(int from, int to, double length_out) {
+
+  //set variables
+  NumericVector sequence = length_out;
+  double by = (to - from) / (length_out  - 1);
+
+  //loop and create sequence
+  for (int i=0; i < length_out; ++i){
+    if(i == 0){
+      sequence[i] = from;
+
+    }else{
+      sequence[i] = sequence[i-1] + by;
+
+    }
+
+  }
+  return sequence;
+}
+
+// -----------------------------------------------------------------------------------------------
+// The function we want to export
+// [[Rcpp::export(".create_RLumDataCurve_matrix")]]
+NumericMatrix create_RLumDataCurve_matrix(
+  NumericVector DATA,
+  int VERSION,
+  int NPOINTS,
+  String LTYPE,
+  int LOW,
+  int HIGH,
+  int AN_TEMP,
+  int TOLDELAY,
+  int TOLON,
+  int TOLOFF
+
+){
+
+  //generate X vectors
+  if(NPOINTS > 0){
+
+    //set needed vectors and predefine matrix
+    NumericVector X = NPOINTS;
+    NumericMatrix curve_matrix(NPOINTS, 2);
+
+    //fill x column for the case we have a TL curve
+    if(LTYPE == "TL" && VERSION >= 4){
+
+      //the heating curve consists of three vectors that needed to
+      //be combined
+      //
+      //(A) - the start ramping
+      NumericVector heat_ramp_start = seq(LOW,AN_TEMP,TOLDELAY);
+      //
+      //(B) - the plateau
+      //B is simply TOLON
+      //
+      //(C) - the end ramping
+      NumericVector heat_ramp_end = seq(AN_TEMP, HIGH, TOLOFF);
+
+      //set index counters
+      int c = 0;
+
+      //fill vector for temperature
+      for(int i = 0; i < X.length(); i++){
+        if(i < heat_ramp_start.length()){
+          X[i] = heat_ramp_start[i];
+
+        }else if(i >= heat_ramp_start.length() && i < heat_ramp_start.length() + TOLON){
+          X[i] = AN_TEMP;
+
+        }else if(i >= heat_ramp_start.length() + TOLON){
+          X[i] = heat_ramp_end[c];
+          c++;
+
+        }
+
+      }
+
+
+    }else{
+      X = seq(LOW, HIGH, NPOINTS);
+
+    }
+
+    //set final matrix
+    curve_matrix.column(0) = X;
+    curve_matrix.column(1) = DATA;
+
+    return(curve_matrix);
+
+  }else{
+
+    //set final matrix
+    NumericMatrix curve_matrix(1, 2);
+    curve_matrix(0,0) = NumericVector::get_na();
+    curve_matrix(0,1) = NumericVector::get_na();
+
+    return(curve_matrix);
+
+  }
+
+}
+
diff --git a/tests/testthat.R b/tests/testthat.R
new file mode 100644
index 0000000..b8d60c8
--- /dev/null
+++ b/tests/testthat.R
@@ -0,0 +1,4 @@
+library(testthat)
+library(Luminescence)
+
+test_check("Luminescence")
diff --git a/tests/testthat/test_Analyse_SAROSLdata.R b/tests/testthat/test_Analyse_SAROSLdata.R
new file mode 100644
index 0000000..c50601b
--- /dev/null
+++ b/tests/testthat/test_Analyse_SAROSLdata.R
@@ -0,0 +1,17 @@
+context("Test old Analyse_SAROSLdata()")
+
+test_that("full example test", {
+  testthat::skip_on_cran()
+  data(ExampleData.BINfileData, envir = environment())
+  output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data,
+                                signal.integral = c(1:5),
+                                background.integral = c(900:1000),
+                                position = c(1:1),
+                                output.plot = FALSE)
+
+  ##checks
+  expect_is(output, "list")
+  expect_length(output, 3)
+
+
+})
diff --git a/tests/testthat/test_CW2pX.R b/tests/testthat/test_CW2pX.R
new file mode 100644
index 0000000..b7f9e93
--- /dev/null
+++ b/tests/testthat/test_CW2pX.R
@@ -0,0 +1,71 @@
+context("CW2X Conversion Tests")
+
+##load data
+data(ExampleData.CW_OSL_Curve, envir = environment())
+values <- CW_Curve.BosWallinga2012
+
+test_that("Check the example and the numerical values", {
+  testthat::skip_on_cran()
+  values_pLM <- CW2pLM(values)
+  values_pLMi <- CW2pLMi(values, P = 1/20)
+  values_pLMi_alt <- CW2pLMi(values)
+  values_pHMi <- CW2pHMi(values, delta = 40)
+  values_pHMi_alt <- CW2pHMi(values)
+  values_pHMi_alt1 <- CW2pHMi(values, delta = 2)
+  values_pPMi <- CW2pPMi(values, P = 1/10)
+
+    ##check conversion sum values
+    expect_equal(round(sum(values_pLM), digits = 0),90089)
+    expect_equal(round(sum(values_pLMi[,1:2]), digits = 0),197522)
+    expect_equal(round(sum(values_pLMi_alt[,1:2]), digits = 0),197522)
+    expect_equal(round(sum(values_pHMi[,1:2]), digits = 0),217431)
+    expect_equal(round(sum(values_pHMi_alt[,1:2]), digits = 0),217519)
+    expect_equal(round(sum(values_pHMi_alt1[,1:2]), digits = 0), 221083)
+    expect_equal(round(sum(values_pPMi[,1:2]), digits = 0),196150)
+
+
+})
+
+test_that("Test RLum.Types", {
+  testthat::skip_on_cran()
+  ##load CW-OSL curve data
+  data(ExampleData.CW_OSL_Curve, envir = environment())
+  object <-
+    set_RLum(
+      class = "RLum.Data.Curve",
+      data = as.matrix(ExampleData.CW_OSL_Curve),
+      curveType = "measured",
+      recordType = "OSL"
+    )
+
+
+  ##transform values
+  expect_is(CW2pLM(object), class = "RLum.Data.Curve")
+  expect_is(CW2pLMi(object), class = "RLum.Data.Curve")
+  expect_is(CW2pHMi(object), class = "RLum.Data.Curve")
+  expect_is(CW2pPMi(object), class = "RLum.Data.Curve")
+
+  ##test error handling
+  expect_error(CW2pLMi(values, P = 0), regexp = "[CW2pLMi] P has to be > 0!", fixed = TRUE)
+  expect_warning(CW2pLMi(values, P = 10))
+  expect_error(object = CW2pLM(values = matrix(0, 2)))
+  expect_error(object = CW2pLMi(values = matrix(0, 2)))
+  expect_error(object = CW2pHMi(values = matrix(0, 2)))
+  expect_error(object = CW2pPMi(values = matrix(0, 2)))
+
+  object at recordType <- "RF"
+  expect_error(object = CW2pLM(values = object),
+               regexp = "[CW2pLM()] recordType RF is not allowed for the transformation!",
+               fixed = TRUE)
+  expect_error(object = CW2pLMi(values = object),
+               regexp = "[CW2pLMi()] recordType RF is not allowed for the transformation!",
+               fixed = TRUE)
+  expect_error(object = CW2pHMi(values = object),
+               regexp = "[CW2pHMi()] recordType RF is not allowed for the transformation!",
+               fixed = TRUE)
+  expect_error(object = CW2pPMi(values = object),
+               regexp = "[CW2pPMi()] recordType RF is not allowed for the transformation!",
+               fixed = TRUE)
+
+
+})
diff --git a/tests/testthat/test_PSL2RisoeBINfiledata.R b/tests/testthat/test_PSL2RisoeBINfiledata.R
new file mode 100644
index 0000000..6632019
--- /dev/null
+++ b/tests/testthat/test_PSL2RisoeBINfiledata.R
@@ -0,0 +1,14 @@
+context("Test PSL2Risoe.BINfileData")
+
+
+test_that("simple test", {
+  testthat::skip_on_cran()
+  data("ExampleData.portableOSL", envir = environment())
+  merged <- merge_RLum(ExampleData.portableOSL)
+  bin <- PSL2Risoe.BINfileData(merged)
+
+  ##checks
+  expect_is(bin, "Risoe.BINfileData")
+  expect_equal(length(bin), 70)
+
+})
diff --git a/tests/testthat/test_RisoeBINfileData-class.R b/tests/testthat/test_RisoeBINfileData-class.R
new file mode 100644
index 0000000..b3dd5fa
--- /dev/null
+++ b/tests/testthat/test_RisoeBINfileData-class.R
@@ -0,0 +1,16 @@
+context("RisoeBINfileData Class Tests")
+
+test_that("Check the example and the numerical values", {
+  testthat::skip_on_cran()
+  ##construct empty object
+  temp <-
+    set_Risoe.BINfileData(METADATA = data.frame(), DATA = list(), .RESERVED = list())
+
+  ##get function and check whether we get NULL
+  expect_null(get_Risoe.BINfileData(temp))
+
+
+  ##check object
+  expect_is(temp, class = "Risoe.BINfileData")
+
+})
diff --git a/tests/testthat/test_Second2Gray.R b/tests/testthat/test_Second2Gray.R
new file mode 100644
index 0000000..7956239
--- /dev/null
+++ b/tests/testthat/test_Second2Gray.R
@@ -0,0 +1,21 @@
+context("Second2Gray")
+
+data(ExampleData.DeValues, envir = environment())
+results <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01))
+results_alt1 <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01), error.propagation = "gaussian")
+results_alt2 <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01), error.propagation = "absolute")
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_is(results, class = "data.frame", info = NULL, label = NULL)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+  expect_equal(sum(results[[1]]), 14754.09)
+  expect_equal(sum(results[[2]]), 507.692)
+  expect_equal(sum(results_alt1[[2]]), 895.911)
+  expect_equal(sum(results_alt2[[2]]), 1245.398)
+
+})
diff --git a/tests/testthat/test_analyse_IRSARRF.R b/tests/testthat/test_analyse_IRSARRF.R
new file mode 100644
index 0000000..52d14bc
--- /dev/null
+++ b/tests/testthat/test_analyse_IRSARRF.R
@@ -0,0 +1,54 @@
+context("analyse_IRSAR.RF")
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+
+  set.seed(1)
+  data(ExampleData.RLum.Analysis, envir = environment())
+  results_fit <- analyse_IRSAR.RF(object = IRSAR.RF.Data, plot = FALSE, method = "FIT")
+  results_slide <- analyse_IRSAR.RF(object = IRSAR.RF.Data, plot = FALSE, method = "SLIDE", n.MC = NULL)
+  results_slide_alt <-
+    analyse_IRSAR.RF(
+      object = IRSAR.RF.Data,
+      plot = FALSE,
+      method = "SLIDE",
+      n.MC = 10,
+      method.control = list(vslide_range = 'auto', trace_vslide = TRUE),
+      txtProgressBar = FALSE
+    )
+
+
+  expect_equal(is(results_fit), c("RLum.Results", "RLum"))
+  expect_equal(length(results_fit), 5)
+  expect_equal(length(results_slide), 5)
+  expect_is(results_fit$fit, class = "nls", info = NULL, label = NULL)
+  expect_is(results_slide$fit, class = "nls", info = NULL, label = NULL)
+  expect_length(results_slide$slide, 10)
+
+
+  expect_equal(results_fit$data$DE, 623.25)
+  expect_equal(results_fit$data$DE.LOWER, 600.63)
+  expect_equal(results_slide$data$DE, 610.17)
+  expect_equal(round(results_slide_alt$data$DE, digits = 0), 384)
+
+
+})
+
+test_that("test controlled chrash conditions", {
+  testthat::skip_on_cran()
+
+  ##the sliding range should not exceed a certrain value ... test it
+  data(ExampleData.RLum.Analysis, envir = environment())
+  expect_error(
+    analyse_IRSAR.RF(
+      object = IRSAR.RF.Data,
+      plot = FALSE,
+      method = "SLIDE",
+      n.MC = 10,
+      method.control = list(vslide_range = c(0,1e+08)),
+      txtProgressBar = FALSE
+    ), regexp = "[:::.analyse_IRSAR_SRS()] 'vslide_range' exceeded maximum size (1e+08)!", fixed = TRUE)
+
+
+
+})
diff --git a/tests/testthat/test_analyse_SARCWOSL.R b/tests/testthat/test_analyse_SARCWOSL.R
new file mode 100644
index 0000000..c6c8a86
--- /dev/null
+++ b/tests/testthat/test_analyse_SARCWOSL.R
@@ -0,0 +1,58 @@
+context("analyse_SAR.CWOSL")
+
+set.seed(1)
+data(ExampleData.BINfileData, envir = environment())
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+
+##perform SAR analysis and set rejection criteria
+results <- analyse_SAR.CWOSL(
+object = object,
+signal.integral.min = 1,
+signal.integral.max = 2,
+background.integral.min = 900,
+background.integral.max = 1000,
+log = "x",
+fit.method = "EXP",
+rejection.criteria = list(
+  recycling.ratio = 10,
+  recuperation.rate = 10,
+  testdose.error = 10,
+  palaeodose.error = 10,
+  exceed.max.regpoint = TRUE),
+plot = FALSE,
+verbose = FALSE
+)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+
+    expect_is(results, "RLum.Results")
+    expect_equal(length(results), 4)
+    expect_is(results$data, "data.frame")
+    expect_is(results$LnLxTnTx.table, "data.frame")
+    expect_is(results$rejection.criteria, "data.frame")
+    expect_is(results$Formula, "expression")
+
+})
+
+test_that("check De values", {
+  testthat::skip_on_cran()
+
+   expect_equal(object = round(sum(results$data[1:2]), digits = 2), 1717.47)
+
+})
+
+test_that("check LxTx table", {
+  testthat::skip_on_cran()
+
+   expect_equal(object = round(sum(results$LnLxTnTx.table$LxTx), digits = 5),  20.92051)
+   expect_equal(object = round(sum(results$LnLxTnTx.table$LxTx.Error), digits = 2), 0.34)
+
+})
+
+test_that("check rejection criteria", {
+  testthat::skip_on_cran()
+
+  expect_equal(object = round(sum(results$rejection.criteria$Value), digits = 3),  1669.348)
+
+})
diff --git a/tests/testthat/test_analyse_SARTL.R b/tests/testthat/test_analyse_SARTL.R
new file mode 100644
index 0000000..5d2d9a4
--- /dev/null
+++ b/tests/testthat/test_analyse_SARTL.R
@@ -0,0 +1,24 @@
+context("analyse_SAR.TL")
+
+##Full check
+test_that("Test examples", {
+  skip_on_cran()
+
+
+  ##load data
+  data(ExampleData.BINfileData, envir = environment())
+
+  ##transform the values from the first position in a RLum.Analysis object
+  object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3)
+
+  ##perform analysis
+  ##TODO ... there is a warning
+  expect_is(analyse_SAR.TL(object,
+                 signal.integral.min = 210,
+                 signal.integral.max = 220,
+                 log = "y",
+                 fit.method = "EXP OR LIN",
+                 sequence.structure = c("SIGNAL", "BACKGROUND")), "RLum.Results")
+
+})
+
diff --git a/tests/testthat/test_analyse_baSAR.R b/tests/testthat/test_analyse_baSAR.R
new file mode 100644
index 0000000..0612e88
--- /dev/null
+++ b/tests/testthat/test_analyse_baSAR.R
@@ -0,0 +1,51 @@
+context("analyse_baSAR")
+
+##Full check
+test_that("Full check of analyse_baSAR function", {
+  skip_on_cran()
+
+    set.seed(1)
+    ##(1) load package test data set
+    data(ExampleData.BINfileData, envir = environment())
+
+    ##(2) selecting relevant curves, and limit dataset
+    CWOSL.SAR.Data <- subset(CWOSL.SAR.Data,
+                             subset = POSITION %in% c(1:3) & LTYPE == "OSL")
+
+
+    ##(3) run analysis
+    ##please not that the here selected parameters are
+    ##choosen for performance, not for reliability
+    results <- analyse_baSAR(
+      object = CWOSL.SAR.Data,
+      source_doserate = c(0.04, 0.001),
+      signal.integral = c(1:2),
+      background.integral = c(80:100),
+      fit.method = "EXP",
+      method_control = list(inits = list(
+        list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1),
+        list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2),
+        list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3)
+      )),
+      plot = FALSE,
+      verbose = FALSE,
+      n.MCMC = 1000,
+      txtProgressBar = FALSE
+
+    )
+
+
+    expect_is(
+      results,
+      class = "RLum.Results",
+      info = NULL,
+      label = NULL
+    )
+    expect_is(results$summary, "data.frame")
+    expect_is(results$mcmc, "mcmc.list")
+    expect_is(results$models, "list")
+
+    expect_equal(round(sum(results$summary[, c(6:9)]), 2), 504.69)
+
+})
+
diff --git a/tests/testthat/test_analyse_pIRIRSequence.R b/tests/testthat/test_analyse_pIRIRSequence.R
new file mode 100644
index 0000000..a1067fd
--- /dev/null
+++ b/tests/testthat/test_analyse_pIRIRSequence.R
@@ -0,0 +1,52 @@
+context("analyse_pIRIRSequence")
+
+set.seed(1)
+data(ExampleData.BINfileData, envir = environment())
+object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1)
+object <- get_RLum(object, record.id = c(-29, -30))
+sequence.structure  <- c(1, 2, 2, 3, 4, 4)
+sequence.structure <-
+  as.vector(sapply(seq(0, length(object) - 1, by = 4),
+                   function(x) {
+                     sequence.structure + x
+                   }))
+
+object <-  sapply(1:length(sequence.structure), function(x) {
+  object[[sequence.structure[x]]]
+
+})
+
+object <-
+  set_RLum(class = "RLum.Analysis",
+           records = object,
+           protocol = "pIRIR")
+results <- analyse_pIRIRSequence(
+  object,
+  signal.integral.min = 1,
+  signal.integral.max = 2,
+  background.integral.min = 900,
+  background.integral.max = 1000,
+  fit.method = "EXP",
+  sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"),
+  main = "Pseudo pIRIR data set based on quartz OSL",
+  plot = FALSE,
+  plot.single = TRUE,
+  verbose = FALSE
+)
+
+test_that("check class and length of output", {
+    testthat::skip_on_cran()
+    expect_is(results, "RLum.Results")
+    expect_equal(length(results), 4)
+    expect_is(results$LnLxTnTx.table, "data.frame")
+    expect_is(results$rejection.criteria, "data.frame")
+
+
+})
+
+test_that("check output", {
+   testthat::skip_on_cran()
+   expect_equal(round(sum(results$data[1:2, 1:4]), 2),7582.62)
+   expect_equal(round(sum(results$rejection.criteria$Value), 2),3338.69)
+
+})
diff --git a/tests/testthat/test_analyse_portableOSL.R b/tests/testthat/test_analyse_portableOSL.R
new file mode 100644
index 0000000..d529b14
--- /dev/null
+++ b/tests/testthat/test_analyse_portableOSL.R
@@ -0,0 +1,27 @@
+context("analyse_portableOSL")
+
+data("ExampleData.portableOSL", envir = environment())
+merged <- merge_RLum(ExampleData.portableOSL)
+results <-
+  analyse_portableOSL(
+    merged,
+    signal.integral = 1:5,
+    invert = FALSE,
+    normalise = TRUE,
+    plot = FALSE
+  )
+
+test_that("check class and length of output", {
+    testthat::skip_on_cran()
+    expect_is(results, "RLum.Results")
+    expect_equal(length(results), 3)
+    expect_is(results$summary, "data.frame")
+    expect_is(results$data, "RLum.Analysis")
+
+})
+
+test_that("check output", {
+  testthat::skip_on_cran()
+  expect_equal(round(sum(results$summary), digits = 2), 70.44)
+
+})
diff --git a/tests/testthat/test_bin_RLumData.R b/tests/testthat/test_bin_RLumData.R
new file mode 100644
index 0000000..263f43f
--- /dev/null
+++ b/tests/testthat/test_bin_RLumData.R
@@ -0,0 +1,27 @@
+context("bin_RLum.Data")
+
+data(ExampleData.CW_OSL_Curve, envir = environment())
+curve <-
+  set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = "OSL",
+      data = as.matrix(ExampleData.CW_OSL_Curve)
+  )
+
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+
+  expect_is(bin_RLum.Data(curve), class = "RLum.Data.Curve", info = NULL, label = NULL)
+  expect_length(bin_RLum.Data(curve)[,1], 500)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  expect_equal(sum(bin_RLum.Data(curve)[,2]), 119200)
+  expect_equal(sum(bin_RLum.Data(curve, bin = 5)[1,2]), 41146)
+
+})
diff --git a/tests/testthat/test_calc_AliquotSize.R b/tests/testthat/test_calc_AliquotSize.R
new file mode 100644
index 0000000..458ea0f
--- /dev/null
+++ b/tests/testthat/test_calc_AliquotSize.R
@@ -0,0 +1,42 @@
+context("calc_AliquotSize")
+
+set.seed(1)
+temp <- calc_AliquotSize(
+  grain.size = c(100,150),
+  sample.diameter = 1,
+  MC.iter = 100,
+  plot = FALSE,
+  verbose = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 2)
+  expect_is(temp$summary, "data.frame")
+  expect_is(temp$MC, "list")
+
+})
+
+test_that("check summary output", {
+  testthat::skip_on_cran()
+  result <- get_RLum(temp)
+
+  expect_equal(result$grain.size, 125)
+  expect_equal(result$sample.diameter, 1)
+  expect_equal(result$packing.density, 0.65)
+  expect_equal(result$n.grains, 42)
+  expect_equal(result$grains.counted, NA)
+})
+
+test_that("check MC run", {
+  testthat::skip_on_cran()
+  expect_equal(round(temp$MC$statistics$n), 100)
+  expect_equal(round(temp$MC$statistics$mean), 43)
+  expect_equal(round(temp$MC$statistics$median), 39)
+  expect_equal(round(temp$MC$statistics$sd.abs), 20)
+  expect_equal(round(temp$MC$statistics$sd.rel), 45)
+  expect_equal(round(temp$MC$statistics$se.abs), 2)
+  expect_equal(round(temp$MC$statistics$se.rel), 5)
+  expect_length(temp$MC$kde$x, 10000)
+  expect_length(temp$MC$kde$y, 10000)
+})
diff --git a/tests/testthat/test_calc_AverageDose.R b/tests/testthat/test_calc_AverageDose.R
new file mode 100644
index 0000000..e7947d5
--- /dev/null
+++ b/tests/testthat/test_calc_AverageDose.R
@@ -0,0 +1,24 @@
+context("calc_AverageDose")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,],
+                       sigma_m = 0.1,
+                       plot = FALSE,
+                       verbose = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 3)
+
+})
+
+test_that("check summary output", {
+  testthat::skip_on_cran()
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$AVERAGE_DOSE, digits = 4), 65.3597)
+  expect_equal(round(results$SIGMA_D, digits = 4), 0.3092)
+  expect_equal(round(results$L_MAX, digits = 5), -19.25096)
+})
+
diff --git a/tests/testthat/test_calc_CentralDose.R b/tests/testthat/test_calc_CentralDose.R
new file mode 100644
index 0000000..052433f
--- /dev/null
+++ b/tests/testthat/test_calc_CentralDose.R
@@ -0,0 +1,27 @@
+context("calc_CentralDose")
+
+data(ExampleData.DeValues, envir = environment())
+
+temp <- calc_CentralDose(
+  ExampleData.DeValues$CA1,
+  plot = FALSE,
+  verbose = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 5)
+
+})
+
+test_that("check summary output", {
+  testthat::skip_on_cran()
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$de, digits = 5), 65.70929)
+  expect_equal(round(results$de_err, digits = 6), 3.053443)
+  expect_equal(round(results$OD, digits = 5), 34.69061)
+  expect_equal(round(results$OD_err, digits = 6), 3.458774)
+  expect_equal(round(results$Lmax, digits = 5), 31.85046)
+})
+
diff --git a/tests/testthat/test_calc_CommonDose.R b/tests/testthat/test_calc_CommonDose.R
new file mode 100644
index 0000000..f9c6212
--- /dev/null
+++ b/tests/testthat/test_calc_CommonDose.R
@@ -0,0 +1,26 @@
+context("calc_CommonDose")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_CommonDose(ExampleData.DeValues$CA1, plot = FALSE, verbose = FALSE)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 4)
+
+})
+
+test_that("check values from output", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$de, digits = 5), 62.15999)
+  expect_equal(round(results$de_err, digits = 7), 0.7815117)
+
+  expect_true(temp at data$args$log)
+  expect_equal(temp at data$args$sigmab, 0)
+
+
+})
diff --git a/tests/testthat/test_calc_CosmicDoseRate.R b/tests/testthat/test_calc_CosmicDoseRate.R
new file mode 100644
index 0000000..03c69af
--- /dev/null
+++ b/tests/testthat/test_calc_CosmicDoseRate.R
@@ -0,0 +1,57 @@
+context("calc_CosmicDoseRate")
+
+temp <- calc_CosmicDoseRate(depth = 2.78, density = 1.7,
+                            latitude = 38.06451, longitude = 1.49646,
+                            altitude = 364, error = 10)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 3)
+
+})
+
+test_that("check values from output example 1", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$depth, 2.78)
+  expect_equal(results$density, 1.7)
+  expect_equal(results$latitude, 38.06451)
+  expect_equal(results$longitude, 1.49646)
+  expect_equal(results$altitude, 364)
+  expect_equal(round(results$total_absorber.gcm2, digits = 0), 473)
+  expect_equal(round(results$d0, digits = 3), 0.152)
+  expect_equal(round(results$geom_lat, digits =  1), 41.1)
+  expect_equal(round(results$dc, digits = 3), 0.161)
+
+
+
+})
+
+
+test_that("check values from output example 2b", {
+  testthat::skip_on_cran()
+  temp <- calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7),
+                              latitude = 12.04332, longitude = 4.43243,
+                              altitude = 364, corr.fieldChanges = TRUE,
+                              est.age = 67, error = 15)
+
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$depth.1, 5)
+  expect_equal(results$depth.2, 2.78)
+  expect_equal(results$density.1, 2.65)
+  expect_equal(results$density.2, 1.7)
+  expect_equal(results$latitude, 12.04332)
+  expect_equal(results$longitude, 4.43243)
+  expect_equal(results$altitude, 364)
+  expect_equal(round(results$total_absorber.gcm2, digits = 0), 1798)
+  expect_equal(round(results$d0, digits = 4), 0.0705)
+  expect_equal(round(results$geom_lat, digits =  1), 15.1)
+  expect_equal(round(results$dc, digits = 3), 0.072)
+
+})
diff --git a/tests/testthat/test_calc_FadingCorr.R b/tests/testthat/test_calc_FadingCorr.R
new file mode 100644
index 0000000..23cf40f
--- /dev/null
+++ b/tests/testthat/test_calc_FadingCorr.R
@@ -0,0 +1,47 @@
+context("calc_FadingCorr")
+
+set.seed(1)
+temp <- calc_FadingCorr(
+  age.faded = c(0.1,0),
+  g_value = c(5.0, 1.0),
+  tc = 2592000,
+  tc.g_value = 172800,
+  n.MC = 100, verbose = FALSE)
+
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 2)
+
+  ##check the verbose mode
+  expect_is(calc_FadingCorr(
+    age.faded = c(0.1,0),
+    g_value = c(5.0, 1.0),
+    tc = 2592000,
+    tc.g_value = 172800,
+    n.MC = 1, verbose = TRUE), class = "RLum.Results")
+
+})
+
+test_that("check values from output example 1", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$AGE, 0.1168)
+  expect_equal(results$AGE.ERROR, 0.0035)
+  expect_equal(results$AGE_FADED, 0.1)
+  expect_equal(results$AGE_FADED.ERROR, 0)
+  expect_equal(results$G_VALUE, 5.312393)
+  expect_equal(round(results$G_VALUE.ERROR, 5), 1.01190)
+  expect_equal(results$KAPPA, 0.02307143)
+  expect_equal(results$KAPPA.ERROR, 0.00439463)
+  expect_equal(results$TC, 8.213721e-05)
+  expect_equal(results$TC.G_VALUE, 5.475814e-06)
+  expect_equal(results$n.MC, 100)
+  expect_equal(results$OBSERVATIONS, 100)
+  expect_equal(results$SEED, NA)
+
+})
diff --git a/tests/testthat/test_calc_FastRatio.R b/tests/testthat/test_calc_FastRatio.R
new file mode 100644
index 0000000..e0c77dd
--- /dev/null
+++ b/tests/testthat/test_calc_FastRatio.R
@@ -0,0 +1,42 @@
+context("calc_FastRatio")
+
+data("ExampleData.CW_OSL_Curve")
+temp <- calc_FastRatio(ExampleData.CW_OSL_Curve, plot = FALSE, verbose = FALSE)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 5)
+
+})
+
+test_that("check values from output", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$fast.ratio, digits = 3), 405.122)
+  expect_equal(round(results$fast.ratio.se, digits = 4), 119.7442)
+  expect_equal(round(results$fast.ratio.rse, digits = 5), 29.55756)
+  expect_equal(results$channels, 1000)
+  expect_equal(round(results$channel.width, digits = 2), 0.04)
+  expect_equal(results$dead.channels.start, 0)
+  expect_equal(results$dead.channels.end, 0)
+  expect_equal(results$sigmaF, 2.6e-17)
+  expect_equal(results$sigmaM, 4.28e-18)
+  expect_equal(results$stimulation.power, 30.6)
+  expect_equal(results$wavelength, 470)
+  expect_equal(results$t_L1, 0)
+  expect_equal(round(results$t_L2, digits = 6), 2.446413)
+  expect_equal(round(results$t_L3_start, digits = 5), 14.86139)
+  expect_equal(round(results$t_L3_end, digits = 5), 22.29208)
+  expect_equal(results$Ch_L1, 1)
+  expect_equal(results$Ch_L2, 62)
+  expect_equal(results$Ch_L3_start, 373)
+  expect_equal(results$Ch_L3_end, 558)
+  expect_equal(results$Cts_L1, 11111)
+  expect_equal(results$Cts_L2, 65)
+  expect_equal(round(results$Cts_L3, digits = 5), 37.66667)
+
+})
diff --git a/tests/testthat/test_calc_FiniteMixture.R b/tests/testthat/test_calc_FiniteMixture.R
new file mode 100755
index 0000000..db5d2b8
--- /dev/null
+++ b/tests/testthat/test_calc_FiniteMixture.R
@@ -0,0 +1,31 @@
+context("calc_FiniteMixture")
+
+data(ExampleData.DeValues, envir = environment())
+
+temp <- calc_FiniteMixture(
+  ExampleData.DeValues$CA1,
+  sigmab = 0.2,
+  n.components = 2,
+  grain.probability = TRUE,
+  verbose = FALSE)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 10)
+
+})
+
+test_that("check values from output example 1", {
+  testthat::skip_on_cran()
+  results <- get_RLum(temp)
+
+  expect_equal(results$de[1], 31.5299)
+  expect_equal(results$de[2], 72.0333)
+  expect_equal(results$de_err[1], 3.6387)
+  expect_equal(results$de_err[2], 2.4082)
+  expect_equal(results$proportion[1], 0.1096)
+  expect_equal(results$proportion[2], 0.8904)
+
+})
diff --git a/tests/testthat/test_calc_FuchsLang2001.R b/tests/testthat/test_calc_FuchsLang2001.R
new file mode 100755
index 0000000..a5dd4db
--- /dev/null
+++ b/tests/testthat/test_calc_FuchsLang2001.R
@@ -0,0 +1,28 @@
+context("calc_FuchsLang2001")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_FuchsLang2001(ExampleData.DeValues$BT998,
+                           cvThreshold = 5,
+                           plot = FALSE,
+                           verbose = FALSE)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 5)
+
+})
+
+test_that("check values from output example 1", {
+
+  testthat::skip_on_cran()
+  results <- get_RLum(temp)
+
+  expect_equal(results$de, 2866.11)
+  expect_equal(results$de_err, 157.35)
+  expect_equal(results$de_weighted, 2846.66)
+  expect_equal(results$de_weighted_err, 20.58)
+  expect_equal(results$n.usedDeValues, 22)
+
+})
diff --git a/tests/testthat/test_calc_HomogeneityTest.R b/tests/testthat/test_calc_HomogeneityTest.R
new file mode 100755
index 0000000..f9dbcf1
--- /dev/null
+++ b/tests/testthat/test_calc_HomogeneityTest.R
@@ -0,0 +1,25 @@
+context("calc_HomogeneityTest")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_HomogeneityTest(ExampleData.DeValues$BT998,
+                           verbose = FALSE)
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 4)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$n, 25)
+  expect_equal(results$g.value, 0.008687915)
+  expect_equal(results$df, 24)
+  expect_equal(results$P.value, 1)
+
+})
diff --git a/tests/testthat/test_calc_IEU.R b/tests/testthat/test_calc_IEU.R
new file mode 100755
index 0000000..35f5353
--- /dev/null
+++ b/tests/testthat/test_calc_IEU.R
@@ -0,0 +1,25 @@
+context("calc_IEU")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_IEU(ExampleData.DeValues$CA1,
+                 a = 0.2,
+                 b = 1.9,
+                 interval = 1, verbose = FALSE, plot = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 5)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$de, 46.67)
+  expect_equal(results$de_err, 2.55)
+  expect_equal(results$n, 24)
+
+})
diff --git a/tests/testthat/test_calc_Kars2008.R b/tests/testthat/test_calc_Kars2008.R
new file mode 100644
index 0000000..ef3594c
--- /dev/null
+++ b/tests/testthat/test_calc_Kars2008.R
@@ -0,0 +1,57 @@
+context("calc_Kars2008")
+
+set.seed(1)
+data("ExampleData.Fading", envir = environment())
+fading_data <- ExampleData.Fading$fading.data$IR50
+data <- ExampleData.Fading$equivalentDose.data$IR50
+ddot <- c(7.00, 0.004)
+readerDdot <- c(0.134, 0.0067)
+
+rhop <-
+  analyse_FadingMeasurement(fading_data,
+                            plot = FALSE,
+                            verbose = FALSE,
+                            n.MC = 10)
+kars <- calc_Kars2008(
+  data = data,
+  rhop = rhop,
+  ddot = ddot,
+  readerDdot = readerDdot,
+  n.MC = 50,
+  plot = FALSE
+)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  ##rhop
+  expect_is(rhop, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(rhop$fading_results, "data.frame")
+    expect_is(rhop$fit, "lm")
+    expect_is(rhop$rho_prime, "data.frame")
+
+  ##kars
+  expect_is(kars, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(kars$results, class = "data.frame", info = NULL, label = NULL)
+    expect_is(kars$data, class = "data.frame", info = NULL, label = NULL)
+    expect_is(kars$Ln, class = "numeric", info = NULL, label = NULL)
+    expect_is(kars$fits, class = "list", info = NULL, label = NULL)
+
+})
+
+test_that("check values from analyse_FadingMeasurement()", {
+    expect_equal(round(sum(rhop$fading_results[,1:9]),0),415)
+    expect_equal(round(sum(rhop$rho_prime),5),2e-05)
+    expect_equal(round(sum(rhop$irr.times)), 2673108)
+
+})
+
+test_that("check values from calc_Kars2008()", {
+  testthat::skip_on_cran()
+  expect_equal(round(sum(kars$results),0), 2417)
+  expect_equal(round(sum(kars$data),0), 191530)
+  expect_equal(round(sum(kars$Ln),4), 0.1585)
+  expect_equal(round(sum(residuals(kars$fits$simulated)),4),  1.2386)
+  expect_equal(round(sum(residuals(kars$fits$measured)),4),  0.1894)
+  expect_equal(round(sum(residuals(kars$fits$unfaded)),4),  1.6293)
+
+})
diff --git a/tests/testthat/test_calc_MaxDose.R b/tests/testthat/test_calc_MaxDose.R
new file mode 100755
index 0000000..f704cf7
--- /dev/null
+++ b/tests/testthat/test_calc_MaxDose.R
@@ -0,0 +1,34 @@
+context("calc_MaxDose")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_MaxDose(ExampleData.DeValues$CA1,
+                     sigmab = 0.2,
+                     par = 3,
+                     plot = FALSE,
+                     verbose = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 9)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$de, digits = 5), 76.57571)
+  expect_equal(round(results$de_err, digits = 6), 7.569908)
+  expect_equal(results$ci_level, 0.95)
+  expect_equal(round(results$ci_lower, digits = 5), 69.65358)
+  expect_equal(round(results$ci_upper, digits = 5), 99.32762)
+  expect_equal(results$par, 3)
+  expect_equal(round(results$sig, digits = 7), 0.5376628)
+  expect_equal(round(results$p0, digits = 7), 0.6482137)
+  expect_equal(results$mu, NA)
+  expect_equal(round(results$Lmax, digits = 5), -19.79245)
+  expect_equal(round(results$BIC, digits = 5), 58.86603)
+
+})
diff --git a/tests/testthat/test_calc_MinDose.R b/tests/testthat/test_calc_MinDose.R
new file mode 100755
index 0000000..1415942
--- /dev/null
+++ b/tests/testthat/test_calc_MinDose.R
@@ -0,0 +1,33 @@
+context("calc_MinDose")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_MinDose(data = ExampleData.DeValues$CA1,
+                     sigmab = 0.1,
+                     verbose = FALSE,
+                     plot = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 9)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$de, digits = 5), 34.31834)
+  expect_equal(round(results$de_err, digits = 6), 2.550964)
+  expect_equal(results$ci_level, 0.95)
+  expect_equal(round(results$ci_lower, digits = 5), 29.37526)
+  expect_equal(round(results$ci_upper, digits = 5), 39.37503)
+  expect_equal(results$par, 3)
+  expect_equal(round(results$sig, digits = 7), 0.7287325)
+  expect_equal(round(results$p0, digits = 8), 0.01053938)
+  expect_equal(results$mu, NA)
+  expect_equal(round(results$Lmax, digits = 5), -43.57969)
+  expect_equal(round(results$BIC, digits = 4), 106.4405)
+
+})
diff --git a/tests/testthat/test_calc_OSLLxTxRatio.R b/tests/testthat/test_calc_OSLLxTxRatio.R
new file mode 100755
index 0000000..12c36a7
--- /dev/null
+++ b/tests/testthat/test_calc_OSLLxTxRatio.R
@@ -0,0 +1,34 @@
+context("calc_OSLLxTxRatio")
+
+data(ExampleData.LxTxOSLData, envir = environment())
+
+temp <- calc_OSLLxTxRatio(
+  Lx.data = Lx.data,
+  Tx.data = Tx.data,
+  signal.integral = c(1:2),
+  background.integral = c(85:100))
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 2)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(results$LnLx, 81709)
+  expect_equal(results$LnLx.BG, 530)
+  expect_equal(results$TnTx, 7403)
+  expect_equal(results$TnTx.BG, 513)
+  expect_equal(results$Net_LnLx, 81179)
+  expect_equal(round(results$Net_LnLx.Error, digits = 4), 286.5461)
+  expect_equal(results$Net_TnTx, 6890)
+  expect_equal(round(results$Net_TnTx.Error, digits = 5), 88.53581)
+  expect_equal(round(results$LxTx, digits = 5), 11.78215)
+  expect_equal(round(results$LxTx.Error, digits = 7), 0.1570077)
+
+})
diff --git a/tests/testthat/test_calc_SourceDoseRate.R b/tests/testthat/test_calc_SourceDoseRate.R
new file mode 100755
index 0000000..dd81059
--- /dev/null
+++ b/tests/testthat/test_calc_SourceDoseRate.R
@@ -0,0 +1,23 @@
+context("calc_SourceDoseRate")
+
+temp <- calc_SourceDoseRate(measurement.date = "2012-01-27",
+                           calib.date = "2014-12-19",
+                           calib.dose.rate = 0.0438,
+                           calib.error = 0.0019)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 3)
+
+})
+
+test_that("check values from output example 1", {
+  testthat::skip_on_cran()
+  results <- get_RLum(temp)
+
+  expect_equal(round(results$dose.rate, digits = 8), 0.04695031)
+  expect_equal(round(results$dose.rate.error, digits = 9), 0.002036657)
+  expect_equal(results$date, as.Date("2012-01-27"))
+
+})
diff --git a/tests/testthat/test_calc_Statistics.R b/tests/testthat/test_calc_Statistics.R
new file mode 100644
index 0000000..e3f7804
--- /dev/null
+++ b/tests/testthat/test_calc_Statistics.R
@@ -0,0 +1,87 @@
+context("calc_Statistics")
+
+## load example data
+data(ExampleData.DeValues, envir = environment())
+
+## calculate statistics and show output
+set.seed(1)
+temp <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000)
+temp_alt1 <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000, digits = 2)
+temp_alt2 <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000, digits = NULL)
+temp_RLum <- set_RLum(class = "RLum.Results", data = list(data = ExampleData.DeValues$BT998))
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("list", "vector"))
+  expect_equal(length(temp), 3)
+
+})
+
+test_that("Test certain input scenarios", {
+  expect_is(calc_Statistics(temp_RLum), "list")
+
+  df <- ExampleData.DeValues$BT998
+  df[,2] <- 0
+  expect_warning(calc_Statistics(df))
+
+
+})
+
+
+test_that("check error messages", {
+  testthat::skip_on_cran()
+  expect_error(calc_Statistics(data = matrix(0,2)),
+               regexp = "[calc_Statistics()] Input data is neither of type 'data.frame' nor 'RLum.Results'",
+               fixed = TRUE)
+  expect_error(calc_Statistics(data = df, weight.calc = "test"))
+
+})
+
+
+test_that("check weighted values from output", {
+  testthat::skip_on_cran()
+  expect_equal(temp$weighted$n, 25)
+  expect_equal(sum(unlist(temp_alt1)),24535.72)
+  expect_equal(sum(unlist(temp_alt2)),24534.1)
+  expect_equal(round(temp$weighted$mean, digits = 3), 2896.036)
+  expect_equal(round(temp$weighted$median, digits = 2), 2884.46)
+  expect_equal(round(temp$weighted$sd.abs, digits = 4), 240.2228)
+  expect_equal(round(temp$weighted$sd.rel, digits = 6), 8.294885)
+  expect_equal(round(temp$weighted$se.abs, digits = 5), 48.04457)
+  expect_equal(round(temp$weighted$se.rel, digits = 6), 1.658977)
+  expect_equal(round(temp$weighted$skewness, digits = 6), 1.342018)
+  expect_equal(round(temp$weighted$kurtosis, digits = 6), 4.387913)
+
+
+})
+
+test_that("check unweighted values from output", {
+  testthat::skip_on_cran()
+
+  expect_equal(temp$weighted$n, 25)
+  expect_equal(round(temp$unweighted$mean, digits = 3), 2950.818)
+  expect_equal(round(temp$unweighted$median, digits = 2), 2884.46)
+  expect_equal(round(temp$unweighted$sd.abs, digits = 4), 281.6433)
+  expect_equal(round(temp$unweighted$sd.rel, digits = 6), 9.544584)
+  expect_equal(round(temp$unweighted$se.abs, digits = 5), 56.32866)
+  expect_equal(round(temp$unweighted$se.rel, digits = 6), 1.908917)
+  expect_equal(round(temp$unweighted$skewness, digits = 6), 1.342018)
+  expect_equal(round(temp$unweighted$kurtosis, digits = 6), 4.387913)
+
+
+})
+
+test_that("check MCM values from output", {
+
+  expect_equal(temp$MCM$n, 25)
+  expect_equal(round(temp$MCM$mean, digits = 3), 2950.992)
+  expect_equal(round(temp$MCM$median, digits = 3), 2885.622)
+  expect_equal(round(temp$MCM$sd.abs, digits = 4), 295.0737)
+  expect_equal(round(temp$MCM$sd.rel, digits = 6), 9.999137)
+  expect_equal(round(temp$MCM$se.abs, digits = 5), 59.01474)
+  expect_equal(round(temp$MCM$se.rel, digits = 6), 1.999827)
+  expect_equal(round(temp$MCM$skewness, digits = 3), 1286.082)
+  expect_equal(round(temp$MCM$kurtosis, digits = 3), 4757.097)
+
+
+})
diff --git a/tests/testthat/test_calc_TLLxTxRatio.R b/tests/testthat/test_calc_TLLxTxRatio.R
new file mode 100644
index 0000000..cb83316
--- /dev/null
+++ b/tests/testthat/test_calc_TLLxTxRatio.R
@@ -0,0 +1,47 @@
+context("calc_TLLxTxRatio")
+
+##load package example data
+data(ExampleData.BINfileData, envir = environment())
+
+##convert Risoe.BINfileData into a curve object
+temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3)
+
+
+Lx.data.signal <- get_RLum(temp, record.id=1)
+Lx.data.background <- get_RLum(temp, record.id=2)
+Tx.data.signal <- get_RLum(temp, record.id=3)
+Tx.data.background <- get_RLum(temp, record.id=4)
+signal.integral.min <- 210
+signal.integral.max <- 230
+
+temp <- calc_TLLxTxRatio(Lx.data.signal,
+                           Lx.data.background,
+                           Tx.data.signal, Tx.data.background,
+                           signal.integral.min, signal.integral.max)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 1)
+
+})
+
+test_that("check values from output", {
+  testthat::skip_on_cran()
+
+  results <- get_RLum(temp)
+
+  expect_equal(length(results), 10)
+
+  expect_equal(results$LnLx, 257042)
+  expect_equal(results$LnLx.BG, 4068)
+  expect_equal(results$TnTx, 82298)
+  expect_equal(results$TnTx.BG, 2943)
+  expect_equal(results$net_LnLx, 252974)
+  expect_equal(round(results$net_LnLx.Error, digits = 2), 49468.92)
+  expect_equal(results$net_TnTx, 79355)
+  expect_equal(round(results$net_TnTx.Error,2), 21449.72)
+  expect_equal(round(results$LxTx, digits =  6), 3.187877)
+  expect_equal(round(results$LxTx.Error, digits = 6), 1.485073)
+
+})
diff --git a/tests/testthat/test_calc_ThermalLifetime.R b/tests/testthat/test_calc_ThermalLifetime.R
new file mode 100644
index 0000000..23163a6
--- /dev/null
+++ b/tests/testthat/test_calc_ThermalLifetime.R
@@ -0,0 +1,81 @@
+context("calc_ThermalLifetime")
+
+
+##EXAMPLE 1
+##calculation for two trap-depths with similar frequency factor for different temperatures
+E <- c(1.66, 1.70)
+s <- 1e+13
+T <- 10:20
+
+set.seed(1)
+temp <- calc_ThermalLifetime(
+ E = E,
+ s = s,
+ T = T,
+ output_unit = "Ma",
+ verbose = FALSE
+)
+
+
+test_that("check class and length of output example 1", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 2)
+
+})
+
+test_that("check values from output example 1", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp$lifetimes), c("array", "structure", "vector"))
+  expect_equal(dim(temp$lifetimes), c(1, 2, 11))
+
+  ##check results for 10 °C
+
+  results <- lapply(1:length(T), function(x){
+    temp$lifetimes[,,x]
+  })
+
+  expect_equal(round(results[[1]], digits = 3),  c("1.66" = 1115.541, "1.7" = 5747.042))
+  expect_equal(round(results[[2]], digits = 4),  c("1.66" = 878.0196, "1.7" = 4497.3585))
+  expect_equal(round(results[[3]], digits = 4),  c("1.66" = 692.2329, "1.7" = 3525.4738))
+  expect_equal(round(results[[4]], digits = 4),  c("1.66" = 546.6658, "1.7" = 2768.3216))
+  expect_equal(round(results[[5]], digits = 4),  c("1.66" = 432.4199, "1.7" = 2177.4436))
+  expect_equal(round(results[[6]], digits = 4),  c("1.66" = 342.6069, "1.7" = 1715.5406))
+  expect_equal(round(results[[7]], digits = 4),  c("1.66" = 271.8854, "1.7" = 1353.8523))
+  expect_equal(round(results[[8]], digits = 4),  c("1.66" = 216.1065, "1.7" = 1070.1642))
+  expect_equal(round(results[[9]], digits = 4),  c("1.66" = 172.0421, "1.7" = 847.2879))
+  expect_equal(round(results[[10]], digits = 4), c("1.66" = 137.1765, "1.7" = 671.9020))
+  expect_equal(round(results[[11]], digits = 4), c("1.66" = 109.5458, "1.7" = 533.6641))
+
+})
+
+##EXAMPLE 2
+##profiling of thermal life time for E and s and their standard error
+E <- c(1.600, 0.003)
+s <- c(1e+13,1e+011)
+T <- 20
+
+set.seed(1)
+temp <- calc_ThermalLifetime(
+  E = E,
+  s = s,
+  T = T,
+  profiling = TRUE,
+  output_unit = "Ma",
+  verbose = FALSE,
+  plot = FALSE
+)
+
+test_that("check class and length of output example 2", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp), c("RLum.Results", "RLum"))
+  expect_equal(length(temp), 2)
+
+})
+
+test_that("check values from output example 2", {
+  testthat::skip_on_cran()
+  expect_equal(is(temp$lifetimes), c("numeric", "vector"))
+  expect_equal(length(temp$lifetimes), 1000)
+  expect_equal(dim(temp$profiling_matrix), c(1000, 4))
+})
diff --git a/tests/testthat/test_calc_gSGC.R b/tests/testthat/test_calc_gSGC.R
new file mode 100644
index 0000000..34ff4ab
--- /dev/null
+++ b/tests/testthat/test_calc_gSGC.R
@@ -0,0 +1,28 @@
+context("calc_gSGC")
+
+set.seed(seed = 1)
+temp <- calc_gSGC(data = data.frame(
+  LnTn =  2.361, LnTn.error = 0.087,
+  Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091,
+  Dr1 = 34.4),
+  plot = FALSE,
+  verbose = FALSE
+  )
+
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_is(temp, class = "RLum.Results", info = NULL, label = NULL)
+  expect_is(temp$De, class = "data.frame", info = NULL, label = NULL)
+  expect_is(temp$De.MC, class = "list", info = NULL, label = NULL)
+  expect_equal(length(temp), 3)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+  expect_equal(round(sum(temp$De), digits = 2), 30.39)
+  expect_equal(round(sum(temp$De.MC[[1]]), 0), 10848)
+
+})
diff --git a/tests/testthat/test_convert_X2CSV.R b/tests/testthat/test_convert_X2CSV.R
new file mode 100644
index 0000000..41d75a2
--- /dev/null
+++ b/tests/testthat/test_convert_X2CSV.R
@@ -0,0 +1,25 @@
+context("convert_X2CSV")
+
+test_that("test convert functions", {
+  testthat::skip_on_cran()
+
+  ##test for errors
+  expect_error(convert_BIN2CSV(file = "", export = FALSE),
+               regexp = "[read_BIN2R()] File does not exist!",
+               fixed = TRUE)
+  expect_error(convert_Daybreak2CSV(file = "", export = FALSE),
+               regexp = "[read_Daybreak2R()] file name does not seem to exist.",
+               fixed = TRUE)
+  #expect_error(convert_PSL2CSV(file = "", export = FALSE))
+  expect_error(suppressWarnings(convert_XSYG2CSV(file = "", export = FALSE)))
+
+  ##test conversion itself
+    ##BIN2CSV
+    data(ExampleData.BINfileData, envir = environment())
+    expect_is(convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE), "list")
+
+    ##XSYG2CSV
+    data(ExampleData.XSYG, envir = environment())
+    expect_is(convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE), "list")
+
+})
diff --git a/tests/testthat/test_fit_CWCurve.R b/tests/testthat/test_fit_CWCurve.R
new file mode 100644
index 0000000..2ec117a
--- /dev/null
+++ b/tests/testthat/test_fit_CWCurve.R
@@ -0,0 +1,24 @@
+context("fit_CWCurve")
+
+data(ExampleData.CW_OSL_Curve, envir = environment())
+fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve,
+                   main = "CW Curve Fit",
+                   n.components.max = 4,
+                   log = "x",
+                   plot = FALSE)
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_equal(is(fit), c("RLum.Results", "RLum"))
+  expect_equal(length(fit), 3)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+  expect_equal(fit$data$n.components, 3)
+  expect_equal(round(fit$data$I01, digits = 3), 2387.617)
+  expect_equal(round(fit$data$lambda1, digits = 5), 4.59054)
+  expect_equal(round(fit$data$`pseudo-R^2`, digits = 4), 0.9995)
+
+})
diff --git a/tests/testthat/test_fit_LMCurve.R b/tests/testthat/test_fit_LMCurve.R
new file mode 100644
index 0000000..ff3d69e
--- /dev/null
+++ b/tests/testthat/test_fit_LMCurve.R
@@ -0,0 +1,55 @@
+context("fit_LWCurve")
+
+## Test 1 with NLS
+data(ExampleData.FittingLM, envir = environment())
+fit <- fit_LMCurve(values = values.curve,
+            values.bg = values.curveBG,
+            n.components = 3,
+            log = "x",
+            start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500)),
+            plot = FALSE)
+
+test_that("check class and length of output", {
+
+  expect_equal(is(fit), c("RLum.Results", "RLum"))
+  expect_equal(length(fit), 3)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  expect_equal(fit$data$n.components, 3)
+  expect_equal(round(fit$data$Im1, digits = 3), 169.44)
+  expect_equal(round(fit$data$xm1, digits = 5), 49.00643)
+  expect_equal(round(fit$data$b1, digits = 5), 1.66554)
+  expect_equal(round(fit$data$`pseudo-R^2`, digits = 4), 0.9437)
+
+})
+
+## Test 2 with LM
+data(ExampleData.FittingLM, envir = environment())
+fit <- fit_LMCurve(values = values.curve,
+                   values.bg = values.curveBG,
+                   n.components = 3,
+                   log = "x",
+                   fit.method = "LM",
+                   plot = FALSE)
+
+test_that("check class and length of output", {
+
+  expect_equal(is(fit), c("RLum.Results", "RLum"))
+  expect_equal(length(fit), 3)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+
+  expect_equal(fit$data$n.components, 3)
+  expect_equal(round(fit$data$Im1, digits = 3), 169.437)
+  expect_equal(round(fit$data$xm1, digits = 5), 49.00509)
+  expect_equal(round(fit$data$b1, digits = 5), 1.66563)
+  expect_equal(round(fit$data$`pseudo-R^2`, digits = 4), 0.9437)
+
+})
diff --git a/tests/testthat/test_get_RLum.R b/tests/testthat/test_get_RLum.R
new file mode 100644
index 0000000..3efbdda
--- /dev/null
+++ b/tests/testthat/test_get_RLum.R
@@ -0,0 +1,26 @@
+context("get_RLum")
+
+data(ExampleData.DeValues, envir = environment())
+temp <- calc_CentralDose(ExampleData.DeValues$CA1, plot = FALSE, verbose = FALSE)
+
+temp_RLumDataCurve <- set_RLum(class = "RLum.Data.Curve")
+temp_RLumDataImage <- set_RLum(class = "RLum.Data.Image")
+temp_RLumDataSpectrum <- set_RLum(class = "RLum.Data.Spectrum")
+temp_RLumAnalysis <- set_RLum(class = "RLum.Analysis")
+temp_RLumResults <- set_RLum(class = "RLum.Results")
+
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_is(get_RLum(temp), class = "data.frame")
+  expect_is(get_RLum(temp, data.object = "args"), class = "list")
+
+  ##test objects
+  expect_is(get_RLum(temp_RLumDataCurve), class = "matrix")
+  expect_is(get_RLum(temp_RLumDataImage), class = "RasterBrick")
+  expect_is(get_RLum(temp_RLumDataSpectrum), class = "matrix")
+  expect_null(get_RLum(temp_RLumAnalysis))
+  expect_null(get_RLum(temp_RLumResults))
+
+})
diff --git a/tests/testthat/test_merge_RLumDataCurve.R b/tests/testthat/test_merge_RLumDataCurve.R
new file mode 100644
index 0000000..1a49dc1
--- /dev/null
+++ b/tests/testthat/test_merge_RLumDataCurve.R
@@ -0,0 +1,22 @@
+context("merge_RLum.Data.Curve")
+
+test_that("Merge tests", {
+  testthat::skip_on_cran()
+
+  ##load example data
+  data(ExampleData.XSYG, envir = environment())
+  TL.curves  <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)")
+  TL.curve.1 <- TL.curves[[1]]
+  TL.curve.3 <- TL.curves[[3]]
+
+  ##check for error
+  expect_error(merge_RLum.Data.Curve("", merge.method = "/"))
+
+  ##check various operations
+  expect_is(TL.curve.1 + TL.curve.3, "RLum.Data.Curve")
+  expect_is(TL.curve.1 - TL.curve.3, "RLum.Data.Curve")
+  expect_is(TL.curve.3 / TL.curve.1, "RLum.Data.Curve")
+  expect_warning(TL.curve.3 / TL.curve.1)
+  expect_is(TL.curve.1 * TL.curve.3, "RLum.Data.Curve")
+
+})
diff --git a/tests/testthat/test_merge_RisoeBINfileData.R b/tests/testthat/test_merge_RisoeBINfileData.R
new file mode 100644
index 0000000..2d4a74c
--- /dev/null
+++ b/tests/testthat/test_merge_RisoeBINfileData.R
@@ -0,0 +1,19 @@
+context("merge_RisoeBINfileData")
+
+##Full check
+test_that("Test merging", {
+  skip_on_cran()
+
+  ##expect error
+  expect_error(merge_Risoe.BINfileData(input.objects = "data"))
+  expect_error(merge_Risoe.BINfileData(input.objects = c("data", "data2")))
+  expect_error(merge_Risoe.BINfileData(input.objects = list("data", "data2")), regexp = "[merge_Risoe.BINfileData()] Input list does not contain Risoe.BINfileData objects!", fixed = TRUE)
+
+  ##expect success
+  data(ExampleData.BINfileData, envir = environment())
+  object1 <- CWOSL.SAR.Data
+  object2 <- CWOSL.SAR.Data
+  expect_is(merge_Risoe.BINfileData(c(object1, object2)), "Risoe.BINfileData")
+
+})
+
diff --git a/tests/testthat/test_names_RLum.R b/tests/testthat/test_names_RLum.R
new file mode 100644
index 0000000..c390efe
--- /dev/null
+++ b/tests/testthat/test_names_RLum.R
@@ -0,0 +1,10 @@
+context("names_RLum")
+
+test_that("Test whether function works", {
+  testthat::skip_on_cran()
+
+  data(ExampleData.RLum.Analysis, envir = environment())
+  expect_silent(names_RLum(IRSAR.RF.Data))
+  expect_is(names_RLum(IRSAR.RF.Data), "character")
+
+})
diff --git a/tests/testthat/test_plot_AbanicoPlot.R b/tests/testthat/test_plot_AbanicoPlot.R
new file mode 100644
index 0000000..e96fdcc
--- /dev/null
+++ b/tests/testthat/test_plot_AbanicoPlot.R
@@ -0,0 +1,166 @@
+context("plot_AbanicoPlot()")
+
+
+test_that("Test examples from the example page", {
+  testthat::skip_on_cran()
+
+   ## load example data and recalculate to Gray
+  data(ExampleData.DeValues, envir = environment())
+  ExampleData.DeValues <- ExampleData.DeValues$CA1
+
+  ## plot the example data straightforward
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues))
+
+  ## now with linear z-scale
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   log.z = FALSE))
+
+  ## now with output of the plot parameters
+  expect_is(plot_AbanicoPlot(data = ExampleData.DeValues,
+                            output = TRUE), "list")
+
+  ## now with adjusted z-scale limits
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   zlim = c(10, 200)))
+
+  ## now with adjusted x-scale limits
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   xlim = c(0, 20)))
+
+  ## now with rug to indicate individual values in KDE part
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   rug = TRUE))
+
+  ## now with a smaller bandwidth for the KDE plot
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   bw = 0.04))
+
+  ## now with a histogram instead of the KDE plot
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   hist = TRUE,
+                   kde = FALSE))
+
+  ## now with a KDE plot and histogram with manual number of bins
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   hist = TRUE,
+                   breaks = 20))
+
+  ## now with a KDE plot and a dot plot
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   dots = TRUE))
+
+  ## now with user-defined plot ratio
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   plot.ratio = 0.5))
+
+  ## now with user-defined central value
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   z.0 = 70))
+
+  ## now with median as central value
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   z.0 = "median"))
+
+  ## now with the 17-83 percentile range as definition of scatter
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   z.0 = "median",
+                   dispersion = "p17"))
+
+  ## now with user-defined green line for minimum age model
+  CAM <- calc_CentralDose(ExampleData.DeValues,
+                          plot = FALSE)
+
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   line = CAM,
+                   line.col = "darkgreen",
+                   line.label = "CAM"))
+
+  ## now create plot with legend, colour, different points and smaller scale
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   legend = "Sample 1",
+                   col = "tomato4",
+                   bar.col = "peachpuff",
+                   pch = "R",
+                   cex = 0.8))
+
+  ## now without 2-sigma bar, polygon, grid lines and central value line
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   bar.col = FALSE,
+                   polygon.col = FALSE,
+                   grid.col = FALSE,
+                   y.axis = FALSE,
+                   lwd = 0))
+
+  ## now with direct display of De errors, without 2-sigma bar
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   bar.col = FALSE,
+                   ylab = "",
+                   y.axis = FALSE,
+                   error.bars = TRUE))
+
+  ## now with user-defined axes labels
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   xlab = c("Data error (%)",
+                            "Data precision"),
+                   ylab = "Scatter",
+                   zlab = "Equivalent dose [Gy]"))
+
+  ## now with minimum, maximum and median value indicated
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   stats = c("min", "max", "median")))
+
+  ## now with a brief statistical summary as subheader
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   summary = c("n", "in.2s")))
+
+  ## now with another statistical summary
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   summary = c("mean.weighted", "median"),
+                   summary.pos = "topleft"))
+
+  ## now a plot with two 2-sigma bars for one data set
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   bar = c(30, 100)))
+
+  ## now the data set is split into sub-groups, one is manipulated
+  data.1 <- ExampleData.DeValues[1:30,]
+  data.2 <- ExampleData.DeValues[31:62,] * 1.3
+  data.3 <- list(data.1, data.2)
+
+  ## now the two data sets are plotted in one plot
+  expect_silent(plot_AbanicoPlot(data = data.3))
+
+  ## now with some graphical modification
+  expect_silent(plot_AbanicoPlot(data = data.3,
+                   z.0 = "median",
+                   col = c("steelblue4", "orange4"),
+                   bar.col = c("steelblue3", "orange3"),
+                   polygon.col = c("steelblue1", "orange1"),
+                   pch = c(2, 6),
+                   angle = c(30, 50),
+                   summary = c("n", "in.2s", "median")))
+
+  ## create Abanico plot with predefined layout definition
+  expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues,
+                   layout = "journal"))
+
+  ## now with predefined layout definition and further modifications
+  expect_silent(plot_AbanicoPlot(data = data.3,
+                   z.0 = "median",
+                   layout = "journal",
+                   col = c("steelblue4", "orange4"),
+                   bar.col = adjustcolor(c("steelblue3", "orange3"),
+                                         alpha.f = 0.5),
+                   polygon.col = c("steelblue3", "orange3")))
+
+  ## for further information on layout definitions see documentation
+  ## of function get_Layout()
+
+  ## now with manually added plot content
+  ## create empty plot with numeric output
+  expect_is(plot_AbanicoPlot(data = ExampleData.DeValues,
+                         pch = NA,
+                         output = TRUE), "list")
+
+
+})
diff --git a/tests/testthat/test_plot_Functions.R b/tests/testthat/test_plot_Functions.R
new file mode 100644
index 0000000..56fcd76
--- /dev/null
+++ b/tests/testthat/test_plot_Functions.R
@@ -0,0 +1,136 @@
+context("Test Various Plot Functions")
+
+
+test_that("test pure success of the plotting without warning or error", {
+  testthat::skip_on_cran()
+  ##distribution plots
+  data(ExampleData.DeValues, envir = environment())
+  ExampleData.DeValues <- ExampleData.DeValues$CA1
+
+  expect_silent(plot_RadialPlot(ExampleData.DeValues))
+  expect_silent(plot_KDE(ExampleData.DeValues))
+  expect_silent(plot_Histogram(ExampleData.DeValues))
+  expect_silent(plot_ViolinPlot(ExampleData.DeValues))
+
+
+  ##plot NRT
+  data("ExampleData.BINfileData", envir = environment())
+  data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL")
+  allCurves <- get_RLum(data)
+  pos <- seq(1, 9, 2)
+  curves <- allCurves[pos]
+  expect_silent(plot_NRt(curves))
+
+  ##filter combinations
+  filter1 <- density(rnorm(100, mean = 450, sd = 20))
+  filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2)
+  filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2)
+  expect_silent(plot_FilterCombinations(filters = list(filter1, filter2)))
+
+   ##plot_Det
+  data(ExampleData.BINfileData, envir = environment())
+  object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+  expect_is(
+    plot_DetPlot(
+      object,
+      signal.integral.min = 1,
+      signal.integral.max = 3,
+      background.integral.min = 900,
+      background.integral.max = 1000,
+      n.channels = 5,
+    ),
+    "RLum.Results"
+  )
+
+  ##plot DRT
+  data(ExampleData.DeValues, envir = environment())
+  expect_silent(plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
+                  given.dose = 2800, mtext = "Example data"))
+
+
+  ##plot RisoeBINFileData
+  data(ExampleData.BINfileData, envir = environment())
+  expect_silent(plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1))
+
+  ##various RLum plots
+
+    ##RLum.Data.Curve
+    data(ExampleData.CW_OSL_Curve, envir = environment())
+    temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve")
+    expect_silent(plot(temp))
+
+    ##RLum.Data.Image
+    data(ExampleData.RLum.Data.Image, envir = environment())
+    expect_silent(plot(ExampleData.RLum.Data.Image))
+
+    ##RLum.Data.Spectrum -------
+    data(ExampleData.XSYG, envir = environment())
+    expect_silent(plot(TL.Spectrum,
+                            plot.type="contour",
+                            xlim = c(310,750),
+                            ylim = c(0,300)))
+
+    expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum,
+                            plot.type="persp",
+                            xlim = c(310,750),
+                            ylim = c(0,100),
+                            bin.rows=10,
+                            bin.cols = 1)))
+
+   expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum,
+                            plot.type="multiple.lines",
+                            xlim = c(310,750),
+                            ylim = c(0,100),
+                            bin.rows=10,
+                            bin.cols = 1)))
+
+   expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+                           xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+                           bin.cols = 1)))
+
+
+   expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+                           xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+                           bin.cols = 1,
+                           type = "heatmap",
+                           showscale = TRUE)))
+
+   expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive",
+                                                          xlim = c(310,750), ylim = c(0,300), bin.rows=10,
+                                                          bin.cols = 1,
+                                                          type = "contour",
+                                                          showscale = TRUE)))
+
+   expect_error(plot(TL.Spectrum,
+                      plot.type="contour",
+                      xlim = c(310,750),
+                      ylim = c(0,300), bin.cols = 0))
+
+
+    ##RLum.Analysis
+    data(ExampleData.BINfileData, envir = environment())
+    temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1)
+    expect_silent(plot(
+      temp,
+      subset = list(recordType = "TL"),
+      combine = TRUE,
+      norm = TRUE,
+      abline = list(v = c(110))
+    ))
+
+    ##RLum.Results
+    grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100)
+    expect_silent(plot_RLum.Results(grains))
+
+
+
+})
+
+
+test_that("test for return values, if any", {
+  testthat::skip_on_cran()
+  data(ExampleData.DeValues, envir = environment())
+  output <- plot_AbanicoPlot(ExampleData.DeValues, output = TRUE)
+    expect_is(output, "list")
+    expect_length(output, 10)
+})
diff --git a/tests/testthat/test_plot_GrowthCurve.R b/tests/testthat/test_plot_GrowthCurve.R
new file mode 100644
index 0000000..2de732a
--- /dev/null
+++ b/tests/testthat/test_plot_GrowthCurve.R
@@ -0,0 +1,84 @@
+context("plot_GrowthCurve")
+
+set.seed(1)
+data(ExampleData.LxTxData, envir = environment())
+temp_EXP <-
+  plot_GrowthCurve(
+    LxTxData,
+    fit.method = "EXP",
+    output.plot = FALSE,
+    verbose = FALSE,
+    NumberIterations.MC = 10
+  )
+temp_LIN <-
+  plot_GrowthCurve(
+    LxTxData,
+    fit.method = "LIN",
+    output.plot = FALSE,
+    verbose = FALSE,
+    NumberIterations.MC = 10
+  )
+temp_EXPLIN <-
+  plot_GrowthCurve(
+    LxTxData,
+    fit.method = "EXP+LIN",
+    output.plot = FALSE,
+    verbose = FALSE,
+    NumberIterations.MC = 10
+  )
+temp_EXPEXP <-
+  plot_GrowthCurve(
+    LxTxData,
+    fit.method = "EXP+EXP",
+    output.plot = FALSE,
+    verbose = FALSE,
+    NumberIterations.MC = 10
+  )
+temp_QDR <-
+  plot_GrowthCurve(
+    LxTxData,
+    fit.method = "QDR",
+    output.plot = FALSE,
+    verbose = FALSE,
+    NumberIterations.MC = 10
+  )
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_is(temp_EXP, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(temp_EXP$Fit, class = "nls")
+
+  expect_is(temp_LIN, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(temp_LIN$Fit, class = "lm")
+
+  expect_is(temp_EXPLIN, class = "RLum.Results", info = NULL, label = NULL)
+   expect_is(temp_EXPLIN$Fit, class = "nls")
+
+  expect_is(temp_EXPEXP, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(temp_EXPEXP$Fit, class = "nls")
+
+  expect_is(temp_QDR, class = "RLum.Results", info = NULL, label = NULL)
+    expect_is(temp_QDR$Fit, class = "lm")
+
+})
+
+test_that("check values from output example", {
+ testthat::skip_on_cran()
+
+ expect_equivalent(round(temp_EXP$De[[1]], digits = 2), 1737.88)
+  expect_equal(round(sum(temp_EXP$De.MC, na.rm = TRUE), digits = 2), 17440.55)
+
+ expect_equivalent(round(temp_LIN$De[[1]], digits = 2), 1811.33)
+  expect_equal(round(sum(temp_LIN$De.MC, na.rm = TRUE), digits = 2),18238.02)
+
+ expect_equivalent(round(temp_EXPLIN$De[[1]], digits = 2), 1791.53)
+   expect_equal(round(sum(temp_EXPLIN$De.MC, na.rm = TRUE), digits = 2),17474.29)
+
+ expect_equivalent(round(temp_EXPEXP$De[[1]], digits = 2), 1787.15)
+  expect_equal(round(sum(temp_EXPEXP$De.MC, na.rm = TRUE), digits = 0), 7316)
+
+ expect_equivalent(round(temp_QDR$De[[1]], digits = 2), 1666.2)
+  expect_equal(round(sum(temp_QDR$De.MC, na.rm = TRUE), digits = 2), 14936.76)
+
+
+})
diff --git a/tests/testthat/test_read_BIN2R.R b/tests/testthat/test_read_BIN2R.R
new file mode 100644
index 0000000..ec19b57
--- /dev/null
+++ b/tests/testthat/test_read_BIN2R.R
@@ -0,0 +1,47 @@
+context("read_BIN2R")
+
+test_that("test the import of various BIN-file versions", {
+  testthat::skip_on_cran()
+
+  ##test for various erros
+  expect_error(read_BIN2R(file = ""), "[read_BIN2R()] File does not exist!", fixed = TRUE)
+
+  ##this test need an internet connect ... test for it
+  if(!httr::http_error("https://github.com/R-Lum/Luminescence/tree/master/tests/testdata")){
+
+    ##try to import every format by using the files on GitHub
+    ##V3
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V3.bin",
+                 txtProgressBar = FALSE), class = "Risoe.BINfileData")
+
+    ##V4
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V4.bin",
+                 txtProgressBar = FALSE), class = "Risoe.BINfileData")
+
+    ##V6
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V6.binx",
+                 txtProgressBar = FALSE), class = "Risoe.BINfileData")
+
+    ##V7
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V7.binx",
+                 txtProgressBar = FALSE), class = "Risoe.BINfileData")
+
+    ##V8
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V8.binx",
+                 txtProgressBar = FALSE), class = "Risoe.BINfileData")
+
+    ##test further options
+
+    ##n.records and fastForward
+    expect_is(
+      read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V4.bin",
+                 txtProgressBar = FALSE, n.records = 1, fastForward = TRUE, verbose = FALSE), class = "list")
+
+  }
+
+})
diff --git a/tests/testthat/test_replicate_RLum.R b/tests/testthat/test_replicate_RLum.R
new file mode 100644
index 0000000..eacdc14
--- /dev/null
+++ b/tests/testthat/test_replicate_RLum.R
@@ -0,0 +1,12 @@
+context("replicate_RLum")
+
+test_that("Test replication of RLum-objects", {
+  skip_on_cran()
+
+  data(ExampleData.RLum.Analysis, envir = environment())
+  expect_silent(results <- rep(IRSAR.RF.Data[[1]], 5))
+
+  ##check
+  expect_equal(length(results),5)
+
+})
diff --git a/tests/testthat/test_smooth_RLum.R b/tests/testthat/test_smooth_RLum.R
new file mode 100644
index 0000000..74d4377
--- /dev/null
+++ b/tests/testthat/test_smooth_RLum.R
@@ -0,0 +1,25 @@
+context("smooth_RLum")
+
+data(ExampleData.CW_OSL_Curve, envir = environment())
+temp <-
+  set_RLum(
+      class = "RLum.Data.Curve",
+      recordType = "OSL",
+      data = as.matrix(ExampleData.CW_OSL_Curve)
+  )
+
+
+
+test_that("check class and length of output", {
+  testthat::skip_on_cran()
+  expect_is(temp, class = "RLum.Data.Curve", info = NULL, label = NULL)
+  expect_is(smooth_RLum(temp), class = "RLum.Data.Curve", info = NULL, label = NULL)
+
+})
+
+test_that("check values from output example", {
+  testthat::skip_on_cran()
+ expect_equivalent(round(mean(smooth_RLum(temp, k = 5)[,2], na.rm = TRUE), 0), 100)
+ expect_equivalent(round(mean(smooth_RLum(temp, k = 10)[,2], na.rm = TRUE), 0), 85)
+
+})
diff --git a/tests/testthat/test_template_DRAC.R b/tests/testthat/test_template_DRAC.R
new file mode 100644
index 0000000..be98908
--- /dev/null
+++ b/tests/testthat/test_template_DRAC.R
@@ -0,0 +1,13 @@
+context("template_DRAC")
+
+##Full check
+test_that("Check template creation ", {
+  skip_on_cran()
+
+  ##test success
+  expect_is(template_DRAC(), "DRAC.list")
+  expect_is(template_DRAC(notification = FALSE), "DRAC.list")
+  expect_is(template_DRAC(nrow = 10, notification = FALSE), "DRAC.list")
+
+})
+
diff --git a/tests/testthat/test_verify_SingleGrainData.R b/tests/testthat/test_verify_SingleGrainData.R
new file mode 100644
index 0000000..57193b4
--- /dev/null
+++ b/tests/testthat/test_verify_SingleGrainData.R
@@ -0,0 +1,14 @@
+context("Test verify_SingleGrainData")
+
+test_that("Various function test", {
+  testthat::skip_on_cran()
+
+  data(ExampleData.XSYG, envir = environment())
+  output <- verify_SingleGrainData(OSL.SARMeasurement$Sequence.Object)
+
+  ##return value
+  expect_is(output, "RLum.Results")
+  expect_is(output$selection_full, "data.frame")
+
+})
+
diff --git a/tests/testthat/test_write_R2BIN.R b/tests/testthat/test_write_R2BIN.R
new file mode 100644
index 0000000..a8ff76b
--- /dev/null
+++ b/tests/testthat/test_write_R2BIN.R
@@ -0,0 +1,57 @@
+context("write_R2BIN")
+
+# Unit test for write_BIN2R() function
+#
+# Problem: the tests are not allowed to write on the file system, therefore, we have to run this
+# manually, but we can test for some errors
+#
+# # Uncomment only to create new test data sets on the file system (for read_BIN2R())
+# data(ExampleData.BINfileData, envir = environment())
+#
+#   ##empty RisoeBINfileData object
+#   empty <- set_Risoe.BINfileData()
+#
+#   ##replace the raw by numeric
+#   CWOSL.SAR.Data at METADATA$VERSION <- as.numeric(CWOSL.SAR.Data at METADATA$VERSION)
+#   CWOSL.SAR.Data at METADATA[] <- lapply(CWOSL.SAR.Data at METADATA, function(x){
+#     if(is.factor(x)){
+#       as.character(x)
+#     }else{
+#       x
+#     }
+#   })
+#
+#   ##combing with existing BIN-file object
+#   new <- as.data.frame(
+#     data.table::rbindlist(l = list(empty at METADATA,CWOSL.SAR.Data at METADATA),fill = TRUE),
+#     stringsAsFactors = FALSE)
+#
+#   ##new object
+#   new <- set_Risoe.BINfileData(METADATA = new, DATA = CWOSL.SAR.Data at DATA)
+#
+#   ##replace NA values
+#   new at METADATA[is.na(new at METADATA)] <- 0
+#
+#   ##replace RECTYPE
+#   new at METADATA$RECTYPE <- 1
+#
+#   ##reduce files size considerably down to two records
+#   new <- subset(new, ID == 1:2)
+#
+#   ##create files
+#   path <- "tests/testdata/"
+#   write_R2BIN(object = new, file = paste0(path, "BINfile_V3.bin"), version = "03")
+#   write_R2BIN(object = new, file = paste0(path, "BINfile_V4.bin"), version = "04")
+#   write_R2BIN(object = new, file = paste0(path, "BINfile_V6.binx"), version = "06")
+#   write_R2BIN(object = new, file = paste0(path, "BINfile_V7.binx"), version = "07")
+#   write_R2BIN(object = new, file = paste0(path, "BINfile_V8.binx"), version = "08")
+
+test_that("write to empty connection", {
+  testthat::skip_on_cran()
+
+  ##catch errors
+  expect_error(write_R2BIN(object = "a"), "[write_R2BIN()] Input object is not of type Risoe.BINfileData!", fixed = TRUE)
+  expect_error(write_R2BIN(object = set_Risoe.BINfileData(), file = ""))
+
+})
+
diff --git a/tests/testthat/test_write_RLum2CSV.R b/tests/testthat/test_write_RLum2CSV.R
new file mode 100644
index 0000000..49f379a
--- /dev/null
+++ b/tests/testthat/test_write_RLum2CSV.R
@@ -0,0 +1,15 @@
+context("write_RLumCSV")
+
+test_that("test errors and general export function", {
+  testthat::skip_on_cran()
+
+  ##test error
+  expect_error(write_RLum2CSV(object = "", export = FALSE),
+               regexp = "[write_RLum2CSV()] Object needs to be a member of the object class RLum!",
+               fixed = TRUE)
+
+  ##test export
+  data("ExampleData.portableOSL", envir = environment())
+  expect_is(write_RLum2CSV(ExampleData.portableOSL, export = FALSE), "list")
+
+})
diff --git a/tests/testthat/test_zzz.R b/tests/testthat/test_zzz.R
new file mode 100644
index 0000000..59efaf5
--- /dev/null
+++ b/tests/testthat/test_zzz.R
@@ -0,0 +1,16 @@
+context("zzz")
+
+test_that("Test zzz functions ... they should still work", {
+  testthat::skip_on_cran()
+
+  ##get right answer
+  expect_equal(get_rightAnswer(), 46)
+  expect_equal(get_rightAnswer("test"), 46)
+
+  ##get quote
+  expect_silent(get_Quote())
+
+  ##tune data
+  expect_warning(tune_Data(1:10))
+
+})

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/r-cran-luminescence.git



More information about the debian-med-commit mailing list